2024-11-24 02:50:56,979 main DEBUG Apache Log4j Core 2.17.2 initializing configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 02:50:56,990 main DEBUG Took 0.009425 seconds to load 1 plugins from package org.apache.hadoop.hbase.logging 2024-11-24 02:50:56,991 main DEBUG PluginManager 'Core' found 129 plugins 2024-11-24 02:50:56,991 main DEBUG PluginManager 'Level' found 0 plugins 2024-11-24 02:50:56,992 main DEBUG PluginManager 'Lookup' found 16 plugins 2024-11-24 02:50:56,993 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:56,999 main DEBUG PluginManager 'TypeConverter' found 26 plugins 2024-11-24 02:50:57,011 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.util.MBeans", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,012 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,013 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.logging.TestJul2Slf4j", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,013 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,014 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.zookeeper", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,014 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,015 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSinkAdapter", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,015 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,016 main DEBUG LoggerConfig$Builder(additivity="null", level="ERROR", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsSystemImpl", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,016 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,017 main DEBUG LoggerConfig$Builder(additivity="false", level="WARN", levelAndRefs="null", name="org.apache.directory", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,017 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,018 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.ipc.FailedServers", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,018 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,019 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop.metrics2.impl.MetricsConfig", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,019 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,019 main DEBUG LoggerConfig$Builder(additivity="null", level="INFO", levelAndRefs="null", name="org.apache.hadoop.hbase.ScheduledChore", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,020 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,020 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase.regionserver.RSRpcServices", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,020 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,021 main DEBUG LoggerConfig$Builder(additivity="null", level="WARN", levelAndRefs="null", name="org.apache.hadoop", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,021 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,021 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hadoop.hbase", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,022 main DEBUG Building Plugin[name=logger, class=org.apache.logging.log4j.core.config.LoggerConfig]. 2024-11-24 02:50:57,022 main DEBUG LoggerConfig$Builder(additivity="null", level="DEBUG", levelAndRefs="null", name="org.apache.hbase.thirdparty.io.netty.channel", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,022 main DEBUG Building Plugin[name=root, class=org.apache.logging.log4j.core.config.LoggerConfig$RootLogger]. 2024-11-24 02:50:57,024 main DEBUG LoggerConfig$RootLogger$Builder(additivity="null", level="null", levelAndRefs="INFO,Console", includeLocation="null", ={}, ={}, Configuration(PropertiesConfig), Filter=null) 2024-11-24 02:50:57,025 main DEBUG Building Plugin[name=loggers, class=org.apache.logging.log4j.core.config.LoggersPlugin]. 2024-11-24 02:50:57,027 main DEBUG createLoggers(={org.apache.hadoop.metrics2.util.MBeans, org.apache.hadoop.hbase.logging.TestJul2Slf4j, org.apache.zookeeper, org.apache.hadoop.metrics2.impl.MetricsSinkAdapter, org.apache.hadoop.metrics2.impl.MetricsSystemImpl, org.apache.directory, org.apache.hadoop.hbase.ipc.FailedServers, org.apache.hadoop.metrics2.impl.MetricsConfig, org.apache.hadoop.hbase.ScheduledChore, org.apache.hadoop.hbase.regionserver.RSRpcServices, org.apache.hadoop, org.apache.hadoop.hbase, org.apache.hbase.thirdparty.io.netty.channel, root}) 2024-11-24 02:50:57,027 main DEBUG Building Plugin[name=layout, class=org.apache.logging.log4j.core.layout.PatternLayout]. 2024-11-24 02:50:57,028 main DEBUG PatternLayout$Builder(pattern="%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n", PatternSelector=null, Configuration(PropertiesConfig), Replace=null, charset="null", alwaysWriteExceptions="null", disableAnsi="null", noConsoleNoAnsi="null", header="null", footer="null") 2024-11-24 02:50:57,029 main DEBUG PluginManager 'Converter' found 47 plugins 2024-11-24 02:50:57,036 main DEBUG Building Plugin[name=appender, class=org.apache.hadoop.hbase.logging.HBaseTestAppender]. 2024-11-24 02:50:57,039 main DEBUG HBaseTestAppender$Builder(target="SYSTEM_ERR", maxSize="1G", bufferedIo="null", bufferSize="null", immediateFlush="null", ignoreExceptions="null", PatternLayout(%d{ISO8601} %-5p [%t%notEmpty{ %X}] %C{2}(%L): %m%n), name="Console", Configuration(PropertiesConfig), Filter=null, ={}) 2024-11-24 02:50:57,041 main DEBUG Starting HBaseTestOutputStreamManager SYSTEM_ERR 2024-11-24 02:50:57,041 main DEBUG Building Plugin[name=appenders, class=org.apache.logging.log4j.core.config.AppendersPlugin]. 2024-11-24 02:50:57,042 main DEBUG createAppenders(={Console}) 2024-11-24 02:50:57,042 main DEBUG Configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca initialized 2024-11-24 02:50:57,043 main DEBUG Starting configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca 2024-11-24 02:50:57,043 main DEBUG Started configuration org.apache.logging.log4j.core.config.properties.PropertiesConfiguration@30f842ca OK. 2024-11-24 02:50:57,044 main DEBUG Shutting down OutputStreamManager SYSTEM_OUT.false.false-1 2024-11-24 02:50:57,044 main DEBUG OutputStream closed 2024-11-24 02:50:57,045 main DEBUG Shut down OutputStreamManager SYSTEM_OUT.false.false-1, all resources released: true 2024-11-24 02:50:57,045 main DEBUG Appender DefaultConsole-1 stopped with status true 2024-11-24 02:50:57,045 main DEBUG Stopped org.apache.logging.log4j.core.config.DefaultConfiguration@6404f418 OK 2024-11-24 02:50:57,133 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6 2024-11-24 02:50:57,135 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=StatusLogger 2024-11-24 02:50:57,136 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=ContextSelector 2024-11-24 02:50:57,138 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name= 2024-11-24 02:50:57,138 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.directory 2024-11-24 02:50:57,139 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSinkAdapter 2024-11-24 02:50:57,139 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.zookeeper 2024-11-24 02:50:57,140 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.logging.TestJul2Slf4j 2024-11-24 02:50:57,140 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsSystemImpl 2024-11-24 02:50:57,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.util.MBeans 2024-11-24 02:50:57,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase 2024-11-24 02:50:57,141 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop 2024-11-24 02:50:57,142 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ipc.FailedServers 2024-11-24 02:50:57,142 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.regionserver.RSRpcServices 2024-11-24 02:50:57,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.metrics2.impl.MetricsConfig 2024-11-24 02:50:57,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hbase.thirdparty.io.netty.channel 2024-11-24 02:50:57,143 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Loggers,name=org.apache.hadoop.hbase.ScheduledChore 2024-11-24 02:50:57,144 main DEBUG Registering MBean org.apache.logging.log4j2:type=1dbd16a6,component=Appenders,name=Console 2024-11-24 02:50:57,146 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24 02:50:57,147 main DEBUG Reconfiguration complete for context[name=1dbd16a6] at URI jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-logging/target/hbase-logging-4.0.0-alpha-1-SNAPSHOT-tests.jar!/log4j2.properties (org.apache.logging.log4j.core.LoggerContext@6dab9b6d) with optional ClassLoader: null 2024-11-24 02:50:57,147 main DEBUG Shutdown hook enabled. Registering a new one. 2024-11-24 02:50:57,148 main DEBUG LoggerContext[name=1dbd16a6, org.apache.logging.log4j.core.LoggerContext@6dab9b6d] started OK. 2024-11-24T02:50:57,390 DEBUG [main {}] hbase.HBaseTestingUtil(323): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c 2024-11-24 02:50:57,392 main DEBUG AsyncLogger.ThreadNameStrategy=UNCACHED (user specified null, default is UNCACHED) 2024-11-24 02:50:57,393 main DEBUG org.apache.logging.log4j.core.util.SystemClock supports precise timestamps. 2024-11-24T02:50:57,401 INFO [main {}] hbase.HBaseClassTestRule(94): Test class org.apache.hadoop.hbase.regionserver.wal.TestLogRolling timeout: 13 mins 2024-11-24T02:50:57,436 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=12, OpenFileDescriptor=287, MaxFileDescriptor=1048576, SystemLoadAverage=288, ProcessCount=11, AvailableMemoryMB=11456 2024-11-24T02:50:57,439 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:50:57,459 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26, deleteOnExit=true 2024-11-24T02:50:57,460 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:50:57,461 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/test.cache.data in system properties and HBase conf 2024-11-24T02:50:57,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:50:57,462 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:50:57,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:50:57,463 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:50:57,464 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:50:57,541 WARN [Time-limited test {}] util.NativeCodeLoader(60): Unable to load native-hadoop library for your platform... using builtin-java classes where applicable 2024-11-24T02:50:57,626 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:50:57,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:50:57,630 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:50:57,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:50:57,631 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:50:57,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:50:57,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:50:57,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:50:57,633 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:50:57,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:50:57,634 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:50:57,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:50:57,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:50:57,635 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:50:57,636 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:50:58,126 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:50:58,882 WARN [Time-limited test {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-namenode.properties,hadoop-metrics2.properties 2024-11-24T02:50:58,999 INFO [Time-limited test {}] log.Log(170): Logging initialized @2738ms to org.eclipse.jetty.util.log.Slf4jLog 2024-11-24T02:50:59,128 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:50:59,244 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:50:59,265 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:50:59,266 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:50:59,268 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:50:59,284 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:50:59,289 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:50:59,290 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:50:59,527 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@735fa16a{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/java.io.tmpdir/jetty-localhost-44905-hadoop-hdfs-3_4_1-tests_jar-_-any-14990779440658244191/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:50:59,536 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:44905} 2024-11-24T02:50:59,537 INFO [Time-limited test {}] server.Server(415): Started @3278ms 2024-11-24T02:50:59,575 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:51:00,158 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:51:00,166 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:51:00,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:51:00,167 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:51:00,167 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:51:00,168 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:51:00,169 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:51:00,300 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b07d1ba{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/java.io.tmpdir/jetty-localhost-35589-hadoop-hdfs-3_4_1-tests_jar-_-any-3029427033461398617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:51:00,301 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:35589} 2024-11-24T02:51:00,302 INFO [Time-limited test {}] server.Server(415): Started @4043ms 2024-11-24T02:51:00,368 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:51:00,493 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:51:00,498 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:51:00,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:51:00,508 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:51:00,508 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:51:00,512 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:51:00,513 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:51:00,645 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1bf97579{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/java.io.tmpdir/jetty-localhost-43567-hadoop-hdfs-3_4_1-tests_jar-_-any-2354237397582347135/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:51:00,646 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:43567} 2024-11-24T02:51:00,646 INFO [Time-limited test {}] server.Server(415): Started @4387ms 2024-11-24T02:51:00,649 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:51:02,026 WARN [Thread-96 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data3/current/BP-2137382314-172.17.0.2-1732416658234/current, will proceed with Du for space computation calculation, 2024-11-24T02:51:02,026 WARN [Thread-97 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data4/current/BP-2137382314-172.17.0.2-1732416658234/current, will proceed with Du for space computation calculation, 2024-11-24T02:51:02,053 WARN [Thread-104 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data1/current/BP-2137382314-172.17.0.2-1732416658234/current, will proceed with Du for space computation calculation, 2024-11-24T02:51:02,053 WARN [Thread-105 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data2/current/BP-2137382314-172.17.0.2-1732416658234/current, will proceed with Du for space computation calculation, 2024-11-24T02:51:02,055 WARN [Thread-81 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:51:02,083 WARN [Thread-58 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:51:02,116 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56720783ea4fae46 with lease ID 0x3238d059bb207dee: Processing first storage report for DS-56040c61-e91e-408b-8a45-7972f0254e98 from datanode DatanodeRegistration(127.0.0.1:36823, datanodeUuid=c36d2f08-adc1-4f3c-9641-e7de8fcb0d61, infoPort=35599, infoSecurePort=0, ipcPort=34887, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234) 2024-11-24T02:51:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56720783ea4fae46 with lease ID 0x3238d059bb207dee: from storage DS-56040c61-e91e-408b-8a45-7972f0254e98 node DatanodeRegistration(127.0.0.1:36823, datanodeUuid=c36d2f08-adc1-4f3c-9641-e7de8fcb0d61, infoPort=35599, infoSecurePort=0, ipcPort=34887, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:51:02,118 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9fc44cae16014d4a with lease ID 0x3238d059bb207def: Processing first storage report for DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6 from datanode DatanodeRegistration(127.0.0.1:38253, datanodeUuid=990f560f-2fbc-4359-b95c-6a6f9aa695f4, infoPort=36079, infoSecurePort=0, ipcPort=40521, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234) 2024-11-24T02:51:02,119 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fc44cae16014d4a with lease ID 0x3238d059bb207def: from storage DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6 node DatanodeRegistration(127.0.0.1:38253, datanodeUuid=990f560f-2fbc-4359-b95c-6a6f9aa695f4, infoPort=36079, infoSecurePort=0, ipcPort=40521, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:51:02,119 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x56720783ea4fae46 with lease ID 0x3238d059bb207dee: Processing first storage report for DS-e71282f0-fc72-4763-be1d-33f5b95fdb4d from datanode DatanodeRegistration(127.0.0.1:36823, datanodeUuid=c36d2f08-adc1-4f3c-9641-e7de8fcb0d61, infoPort=35599, infoSecurePort=0, ipcPort=34887, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234) 2024-11-24T02:51:02,120 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x56720783ea4fae46 with lease ID 0x3238d059bb207dee: from storage DS-e71282f0-fc72-4763-be1d-33f5b95fdb4d node DatanodeRegistration(127.0.0.1:36823, datanodeUuid=c36d2f08-adc1-4f3c-9641-e7de8fcb0d61, infoPort=35599, infoSecurePort=0, ipcPort=34887, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:51:02,120 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9fc44cae16014d4a with lease ID 0x3238d059bb207def: Processing first storage report for DS-e7e2e96d-0040-4643-80dd-b66e1c27b13a from datanode DatanodeRegistration(127.0.0.1:38253, datanodeUuid=990f560f-2fbc-4359-b95c-6a6f9aa695f4, infoPort=36079, infoSecurePort=0, ipcPort=40521, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234) 2024-11-24T02:51:02,120 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9fc44cae16014d4a with lease ID 0x3238d059bb207def: from storage DS-e7e2e96d-0040-4643-80dd-b66e1c27b13a node DatanodeRegistration(127.0.0.1:38253, datanodeUuid=990f560f-2fbc-4359-b95c-6a6f9aa695f4, infoPort=36079, infoSecurePort=0, ipcPort=40521, storageInfo=lv=-57;cid=testClusterID;nsid=1778720313;c=1732416658234), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:51:02,154 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c 2024-11-24T02:51:02,226 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/zookeeper_0, clientPort=49774, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:51:02,234 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=49774 2024-11-24T02:51:02,247 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:02,250 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:02,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:51:02,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:51:02,888 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f with version=8 2024-11-24T02:51:02,888 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1139): Setting hbase.fs.tmp.dir to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase-staging 2024-11-24T02:51:02,965 DEBUG [Time-limited test {}] channel.MultithreadEventLoopGroup(44): -Dio.netty.eventLoopThreads: 16 2024-11-24T02:51:03,166 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:51:03,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:51:03,175 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:51:03,181 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:51:03,181 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:51:03,181 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:51:03,331 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:51:03,410 INFO [Time-limited test {}] metrics.MetricRegistriesLoader(60): Loaded MetricRegistries class org.apache.hadoop.hbase.metrics.impl.MetricRegistriesImpl 2024-11-24T02:51:03,422 DEBUG [Time-limited test {}] util.ClassSize(228): Using Unsafe to estimate memory layout 2024-11-24T02:51:03,427 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:51:03,458 DEBUG [Time-limited test {}] channel.DefaultChannelId(84): -Dio.netty.processId: 60277 (auto-detected) 2024-11-24T02:51:03,460 DEBUG [Time-limited test {}] channel.DefaultChannelId(106): -Dio.netty.machineId: 02:42:ac:ff:fe:11:00:02 (auto-detected) 2024-11-24T02:51:03,483 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33511 2024-11-24T02:51:03,509 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33511 connecting to ZooKeeper ensemble=127.0.0.1:49774 2024-11-24T02:51:03,604 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335110x0, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:51:03,607 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33511-0x1016ac0666c0000 connected 2024-11-24T02:51:04,165 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:04,169 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:04,178 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:51:04,181 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f, hbase.cluster.distributed=false 2024-11-24T02:51:04,202 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:51:04,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33511 2024-11-24T02:51:04,206 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33511 2024-11-24T02:51:04,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33511 2024-11-24T02:51:04,207 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33511 2024-11-24T02:51:04,208 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33511 2024-11-24T02:51:04,315 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:51:04,317 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:51:04,317 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:51:04,318 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:51:04,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:51:04,318 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:51:04,321 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:51:04,323 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:51:04,325 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:43175 2024-11-24T02:51:04,327 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:43175 connecting to ZooKeeper ensemble=127.0.0.1:49774 2024-11-24T02:51:04,328 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:04,334 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:04,598 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:431750x0, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:51:04,599 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:431750x0, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:51:04,600 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:43175-0x1016ac0666c0001 connected 2024-11-24T02:51:04,604 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:51:04,613 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:51:04,616 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:51:04,622 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:51:04,622 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=43175 2024-11-24T02:51:04,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=43175 2024-11-24T02:51:04,623 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=43175 2024-11-24T02:51:04,624 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=43175 2024-11-24T02:51:04,625 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=43175 2024-11-24T02:51:04,640 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:33511 2024-11-24T02:51:04,641 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:05,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:51:05,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:51:05,005 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:05,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:51:05,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:05,221 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:05,223 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:51:05,226 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,33511,1732416663011 from backup master directory 2024-11-24T02:51:05,380 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:05,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:51:05,381 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:51:05,381 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:51:05,382 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:05,384 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating data MemStoreChunkPool with chunk size 2 MB, max count 396, initial count 0 2024-11-24T02:51:05,385 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.ChunkCreator(488): Allocating index MemStoreChunkPool with chunk size 204.80 KB, max count 440, initial count 0 2024-11-24T02:51:05,437 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase.id] with ID: 7210ffa8-c00e-4d65-a308-603738a634e7 2024-11-24T02:51:05,437 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/.tmp/hbase.id 2024-11-24T02:51:05,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:51:05,448 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:51:05,449 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/.tmp/hbase.id]:[hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase.id] 2024-11-24T02:51:05,490 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:05,496 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:51:05,517 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 18ms. 2024-11-24T02:51:05,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:05,760 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:05,776 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:51:05,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:51:05,795 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:51:05,798 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:51:05,805 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:51:05,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:51:05,841 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:51:05,858 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store 2024-11-24T02:51:05,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:51:05,878 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:51:05,883 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] throttle.StoreHotnessProtector(112): StoreHotnessProtector is disabled. Set hbase.region.store.parallel.put.limit > 0 to enable, which may help mitigate load under heavy write pressure. 2024-11-24T02:51:05,885 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:51:05,886 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:51:05,886 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:51:05,887 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:51:05,888 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:51:05,888 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:51:05,888 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:51:05,889 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416665886Disabling compacts and flushes for region at 1732416665886Disabling writes for close at 1732416665888 (+2 ms)Writing region close event to WAL at 1732416665888Closed at 1732416665888 2024-11-24T02:51:05,891 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/.initializing 2024-11-24T02:51:05,891 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/WALs/7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:05,916 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C33511%2C1732416663011, suffix=, logDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/WALs/7c69a60bd8f6,33511,1732416663011, archiveDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/oldWALs, maxLogs=10 2024-11-24T02:51:05,926 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C33511%2C1732416663011.1732416665921 2024-11-24T02:51:05,962 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/WALs/7c69a60bd8f6,33511,1732416663011/7c69a60bd8f6%2C33511%2C1732416663011.1732416665921 2024-11-24T02:51:05,973 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36079:36079),(127.0.0.1/127.0.0.1:35599:35599)] 2024-11-24T02:51:05,994 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:51:05,994 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:51:05,999 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,001 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,047 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,079 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:51:06,084 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:06,087 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:06,088 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,091 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:51:06,091 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:06,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:51:06,093 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,096 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:51:06,097 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:06,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:51:06,098 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,101 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:51:06,101 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:06,103 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:51:06,103 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,107 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,108 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,113 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,113 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,116 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:51:06,120 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:51:06,124 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:51:06,125 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=696628, jitterRate=-0.11419278383255005}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:51:06,132 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416666018Initializing all the Stores at 1732416666020 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416666021 (+1 ms)Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416666022 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416666022Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416666022Cleaning up temporary data from old regions at 1732416666113 (+91 ms)Region opened successfully at 1732416666131 (+18 ms) 2024-11-24T02:51:06,133 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:51:06,165 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@12ad2fd6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:51:06,195 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:51:06,207 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:51:06,207 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:51:06,210 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:51:06,212 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 1 msec 2024-11-24T02:51:06,218 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 5 msec 2024-11-24T02:51:06,218 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:51:06,246 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:51:06,256 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:51:06,283 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:51:06,286 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:51:06,288 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:51:06,374 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:51:06,381 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:51:06,387 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:51:06,451 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:51:06,453 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:51:06,462 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:51:06,484 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:51:06,493 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:51:06,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:51:06,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:51:06,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:06,504 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:06,507 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,33511,1732416663011, sessionid=0x1016ac0666c0000, setting cluster-up flag (Was=false) 2024-11-24T02:51:06,535 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:06,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:06,567 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:51:06,570 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:06,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:06,588 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:06,739 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:51:06,741 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:06,747 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:51:06,833 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(746): ClusterId : 7210ffa8-c00e-4d65-a308-603738a634e7 2024-11-24T02:51:06,835 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:51:07,040 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData-prefix:7c69a60bd8f6,33511,1732416663011 {}] wal.AbstractFSWAL(1368): Slow sync cost: 231 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:07,045 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:51:07,054 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:51:07,059 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:51:07,064 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,33511,1732416663011 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:51:07,148 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:51:07,148 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:51:07,148 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:51:07,148 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:51:07,149 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:51:07,149 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:51:07,149 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:51:07,149 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,149 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:51:07,149 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,151 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416697150 2024-11-24T02:51:07,153 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:51:07,154 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:51:07,154 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:51:07,155 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:51:07,158 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:51:07,159 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:51:07,159 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:51:07,159 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:51:07,160 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:07,160 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:51:07,160 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,164 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:51:07,165 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:51:07,165 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:51:07,167 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:51:07,167 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:51:07,169 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416667168,5,FailOnTimeoutGroup] 2024-11-24T02:51:07,170 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416667169,5,FailOnTimeoutGroup] 2024-11-24T02:51:07,170 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,170 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:51:07,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:51:07,171 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:51:07,171 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,172 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,173 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:51:07,173 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f 2024-11-24T02:51:07,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:51:07,182 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:51:07,184 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:51:07,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:51:07,188 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:51:07,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:07,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:07,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:51:07,192 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:51:07,193 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:07,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:07,194 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:51:07,197 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:51:07,197 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:07,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:07,198 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:51:07,201 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:51:07,201 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:07,202 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:07,202 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:51:07,203 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740 2024-11-24T02:51:07,204 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740 2024-11-24T02:51:07,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:51:07,207 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:51:07,208 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:51:07,211 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:51:07,216 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:51:07,217 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=866783, jitterRate=0.10217230021953583}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:51:07,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416667184Initializing all the Stores at 1732416667186 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416667186Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416667186Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416667186Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416667186Cleaning up temporary data from old regions at 1732416667207 (+21 ms)Region opened successfully at 1732416667221 (+14 ms) 2024-11-24T02:51:07,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:51:07,222 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:51:07,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:51:07,222 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:51:07,223 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:51:07,224 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:51:07,224 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416667222Disabling compacts and flushes for region at 1732416667222Disabling writes for close at 1732416667222Writing region close event to WAL at 1732416667224 (+2 ms)Closed at 1732416667224 2024-11-24T02:51:07,227 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:51:07,228 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:51:07,233 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:51:07,240 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:51:07,243 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:51:07,246 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:51:07,247 DEBUG [RS:0;7c69a60bd8f6:43175 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd27d49, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:51:07,266 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:43175 2024-11-24T02:51:07,270 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:51:07,270 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:51:07,270 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:51:07,273 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,33511,1732416663011 with port=43175, startcode=1732416664278 2024-11-24T02:51:07,285 DEBUG [RS:0;7c69a60bd8f6:43175 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:51:07,346 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:55223, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.0 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:51:07,353 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33511 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:07,355 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33511 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:07,370 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f 2024-11-24T02:51:07,370 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:44189 2024-11-24T02:51:07,370 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:51:07,394 WARN [7c69a60bd8f6:33511 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T02:51:07,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:51:07,404 DEBUG [RS:0;7c69a60bd8f6:43175 {}] zookeeper.ZKUtil(111): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:07,405 WARN [RS:0;7c69a60bd8f6:43175 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:51:07,405 INFO [RS:0;7c69a60bd8f6:43175 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:51:07,405 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:07,408 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,43175,1732416664278] 2024-11-24T02:51:07,430 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:51:07,449 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:51:07,454 INFO [RS:0;7c69a60bd8f6:43175 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:51:07,454 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,455 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:51:07,461 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:51:07,462 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,462 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,462 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,462 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,463 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,464 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:51:07,464 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:51:07,464 DEBUG [RS:0;7c69a60bd8f6:43175 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:51:07,465 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,465 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,465 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,465 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,465 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,466 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,43175,1732416664278-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:51:07,490 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:51:07,492 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,43175,1732416664278-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,492 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,492 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.Replication(171): 7c69a60bd8f6,43175,1732416664278 started 2024-11-24T02:51:07,517 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:07,518 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,43175,1732416664278, RpcServer on 7c69a60bd8f6/172.17.0.2:43175, sessionid=0x1016ac0666c0001 2024-11-24T02:51:07,518 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:51:07,519 DEBUG [RS:0;7c69a60bd8f6:43175 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:07,519 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,43175,1732416664278' 2024-11-24T02:51:07,519 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:51:07,520 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:51:07,521 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:51:07,521 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:51:07,521 DEBUG [RS:0;7c69a60bd8f6:43175 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:07,521 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,43175,1732416664278' 2024-11-24T02:51:07,522 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:51:07,523 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:51:07,524 DEBUG [RS:0;7c69a60bd8f6:43175 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:51:07,524 INFO [RS:0;7c69a60bd8f6:43175 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:51:07,525 INFO [RS:0;7c69a60bd8f6:43175 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:51:07,632 INFO [RS:0;7c69a60bd8f6:43175 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C43175%2C1732416664278, suffix=, logDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278, archiveDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/oldWALs, maxLogs=32 2024-11-24T02:51:07,636 INFO [RS:0;7c69a60bd8f6:43175 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416667635 2024-11-24T02:51:07,648 INFO [RS:0;7c69a60bd8f6:43175 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416667635 2024-11-24T02:51:07,651 DEBUG [RS:0;7c69a60bd8f6:43175 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36079:36079),(127.0.0.1/127.0.0.1:35599:35599)] 2024-11-24T02:51:07,897 DEBUG [7c69a60bd8f6:33511 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T02:51:07,911 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:07,920 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,43175,1732416664278, state=OPENING 2024-11-24T02:51:08,028 DEBUG [PEWorker-5 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:51:08,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:08,093 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:51:08,094 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:51:08,094 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:51:08,096 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:51:08,098 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,43175,1732416664278}] 2024-11-24T02:51:08,276 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:51:08,280 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:32809, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:51:08,294 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:51:08,294 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:51:08,298 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C43175%2C1732416664278.meta, suffix=.meta, logDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278, archiveDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/oldWALs, maxLogs=32 2024-11-24T02:51:08,301 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.meta.1732416668301.meta 2024-11-24T02:51:08,310 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.meta.1732416668301.meta 2024-11-24T02:51:08,312 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35599:35599),(127.0.0.1/127.0.0.1:36079:36079)] 2024-11-24T02:51:08,313 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:51:08,314 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:51:08,317 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:51:08,322 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:51:08,327 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:51:08,328 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:51:08,328 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:51:08,328 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:51:08,332 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:51:08,334 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:51:08,334 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:08,335 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:08,336 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:51:08,338 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:51:08,338 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:08,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:08,339 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:51:08,341 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:51:08,341 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:08,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:08,343 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:51:08,345 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:51:08,345 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:08,346 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:51:08,348 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:51:08,350 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740 2024-11-24T02:51:08,353 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740 2024-11-24T02:51:08,356 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:51:08,356 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:51:08,358 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:51:08,361 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:51:08,362 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=807947, jitterRate=0.027358368039131165}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:51:08,363 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:51:08,365 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416668329Writing region info on filesystem at 1732416668329Initializing all the Stores at 1732416668331 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416668331Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416668332 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416668332Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416668332Cleaning up temporary data from old regions at 1732416668357 (+25 ms)Running coprocessor post-open hooks at 1732416668363 (+6 ms)Region opened successfully at 1732416668364 (+1 ms) 2024-11-24T02:51:08,373 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416668269 2024-11-24T02:51:08,384 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:51:08,385 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:51:08,386 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:08,389 INFO [PEWorker-4 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,43175,1732416664278, state=OPEN 2024-11-24T02:51:08,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:51:08,538 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:51:08,538 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:51:08,538 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:51:08,538 DEBUG [PEWorker-4 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:08,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:51:08,544 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,43175,1732416664278 in 440 msec 2024-11-24T02:51:08,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:51:08,551 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 1.3120 sec 2024-11-24T02:51:08,553 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:51:08,553 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:51:08,572 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:51:08,574 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,43175,1732416664278, seqNum=-1] 2024-11-24T02:51:08,595 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:51:08,597 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33271, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:51:08,624 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 1.8470 sec 2024-11-24T02:51:08,624 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416668624, completionTime=-1 2024-11-24T02:51:08,627 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T02:51:08,627 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:51:08,651 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T02:51:08,651 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732416728651 2024-11-24T02:51:08,651 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732416788651 2024-11-24T02:51:08,652 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 24 msec 2024-11-24T02:51:08,654 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33511,1732416663011-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:08,655 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33511,1732416663011-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:08,655 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33511,1732416663011-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:08,656 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:33511, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:08,656 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:08,657 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:08,663 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:51:08,684 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 3.302sec 2024-11-24T02:51:08,685 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:51:08,686 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:51:08,687 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:51:08,688 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:51:08,688 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:51:08,689 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33511,1732416663011-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:51:08,689 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33511,1732416663011-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:51:08,697 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:51:08,698 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:51:08,698 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33511,1732416663011-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:51:08,745 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66cb686a, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:51:08,747 DEBUG [Time-limited test {}] nio.NioEventLoop(110): -Dio.netty.noKeySetOptimization: false 2024-11-24T02:51:08,747 DEBUG [Time-limited test {}] nio.NioEventLoop(111): -Dio.netty.selectorAutoRebuildThreshold: 512 2024-11-24T02:51:08,750 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,33511,-1 for getting cluster id 2024-11-24T02:51:08,754 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:51:08,762 DEBUG [HMaster-EventLoopGroup-1-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '7210ffa8-c00e-4d65-a308-603738a634e7' 2024-11-24T02:51:08,765 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:51:08,766 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "7210ffa8-c00e-4d65-a308-603738a634e7" 2024-11-24T02:51:08,766 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@390801e4, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:51:08,766 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,33511,-1] 2024-11-24T02:51:08,769 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:51:08,771 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:51:08,773 INFO [HMaster-EventLoopGroup-1-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42660, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:51:08,776 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@32fb6022, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:51:08,777 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:51:08,785 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,43175,1732416664278, seqNum=-1] 2024-11-24T02:51:08,785 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:51:08,789 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-3-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:35878, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:51:08,814 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:08,814 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:51:08,823 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T02:51:08,828 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T02:51:08,833 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncConnectionImpl(321): The fetched master address is 7c69a60bd8f6,33511,1732416663011 2024-11-24T02:51:08,835 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@5434eac4 2024-11-24T02:51:08,836 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T02:51:08,839 INFO [HMaster-EventLoopGroup-1-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42670, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T02:51:08,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33511 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T02:51:08,841 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33511 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T02:51:08,845 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33511 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testSlowSyncLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:51:08,854 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33511 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling 2024-11-24T02:51:08,857 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T02:51:08,860 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33511 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testSlowSyncLogRolling" procId is: 4 2024-11-24T02:51:08,860 DEBUG [PEWorker-5 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:08,863 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T02:51:08,865 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:51:08,916 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741835_1011 (size=389) 2024-11-24T02:51:08,917 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741835_1011 (size=389) 2024-11-24T02:51:08,921 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 95dc1c479890a1fb9c58ac03a9c1ca2f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testSlowSyncLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f 2024-11-24T02:51:08,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741836_1012 (size=72) 2024-11-24T02:51:08,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741836_1012 (size=72) 2024-11-24T02:51:08,935 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:51:08,935 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 95dc1c479890a1fb9c58ac03a9c1ca2f, disabling compactions & flushes 2024-11-24T02:51:08,935 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:08,935 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:08,935 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. after waiting 0 ms 2024-11-24T02:51:08,935 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:08,936 INFO [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:08,936 DEBUG [RegionOpenAndInit-TestLogRolling-testSlowSyncLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: Waiting for close lock at 1732416668935Disabling compacts and flushes for region at 1732416668935Disabling writes for close at 1732416668935Writing region close event to WAL at 1732416668935Closed at 1732416668935 2024-11-24T02:51:08,938 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T02:51:08,943 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.","families":{"info":[{"qualifier":"regioninfo","vlen":71,"tag":[],"timestamp":"1732416668938"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416668938"}]},"ts":"1732416668938"} 2024-11-24T02:51:08,948 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T02:51:08,951 INFO [PEWorker-5 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T02:51:08,953 DEBUG [PEWorker-5 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416668951"}]},"ts":"1732416668951"} 2024-11-24T02:51:08,958 INFO [PEWorker-5 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLING in hbase:meta 2024-11-24T02:51:08,960 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=95dc1c479890a1fb9c58ac03a9c1ca2f, ASSIGN}] 2024-11-24T02:51:08,963 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=95dc1c479890a1fb9c58ac03a9c1ca2f, ASSIGN 2024-11-24T02:51:08,965 INFO [PEWorker-3 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=95dc1c479890a1fb9c58ac03a9c1ca2f, ASSIGN; state=OFFLINE, location=7c69a60bd8f6,43175,1732416664278; forceNewPlan=false, retain=false 2024-11-24T02:51:09,117 INFO [PEWorker-4 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=95dc1c479890a1fb9c58ac03a9c1ca2f, regionState=OPENING, regionLocation=7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:09,122 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=95dc1c479890a1fb9c58ac03a9c1ca2f, ASSIGN because future has completed 2024-11-24T02:51:09,123 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 95dc1c479890a1fb9c58ac03a9c1ca2f, server=7c69a60bd8f6,43175,1732416664278}] 2024-11-24T02:51:09,285 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:09,285 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 95dc1c479890a1fb9c58ac03a9c1ca2f, NAME => 'TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:51:09,286 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testSlowSyncLogRolling 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,286 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:51:09,286 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,286 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,289 INFO [StoreOpener-95dc1c479890a1fb9c58ac03a9c1ca2f-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,291 INFO [StoreOpener-95dc1c479890a1fb9c58ac03a9c1ca2f-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 95dc1c479890a1fb9c58ac03a9c1ca2f columnFamilyName info 2024-11-24T02:51:09,291 DEBUG [StoreOpener-95dc1c479890a1fb9c58ac03a9c1ca2f-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:51:09,292 INFO [StoreOpener-95dc1c479890a1fb9c58ac03a9c1ca2f-1 {}] regionserver.HStore(327): Store=95dc1c479890a1fb9c58ac03a9c1ca2f/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:51:09,293 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,294 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,295 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,296 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,296 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,299 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,303 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:51:09,305 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 95dc1c479890a1fb9c58ac03a9c1ca2f; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=813312, jitterRate=0.03418053686618805}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:51:09,305 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:09,306 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: Running coprocessor pre-open hook at 1732416669286Writing region info on filesystem at 1732416669286Initializing all the Stores at 1732416669288 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416669288Cleaning up temporary data from old regions at 1732416669296 (+8 ms)Running coprocessor post-open hooks at 1732416669305 (+9 ms)Region opened successfully at 1732416669306 (+1 ms) 2024-11-24T02:51:09,309 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f., pid=6, masterSystemTime=1732416669278 2024-11-24T02:51:09,313 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:09,313 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:09,314 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=95dc1c479890a1fb9c58ac03a9c1ca2f, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,43175,1732416664278 2024-11-24T02:51:09,319 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-3-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 95dc1c479890a1fb9c58ac03a9c1ca2f, server=7c69a60bd8f6,43175,1732416664278 because future has completed 2024-11-24T02:51:09,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T02:51:09,325 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 95dc1c479890a1fb9c58ac03a9c1ca2f, server=7c69a60bd8f6,43175,1732416664278 in 198 msec 2024-11-24T02:51:09,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T02:51:09,330 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testSlowSyncLogRolling, region=95dc1c479890a1fb9c58ac03a9c1ca2f, ASSIGN in 365 msec 2024-11-24T02:51:09,332 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T02:51:09,332 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testSlowSyncLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416669332"}]},"ts":"1732416669332"} 2024-11-24T02:51:09,337 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testSlowSyncLogRolling, state=ENABLED in hbase:meta 2024-11-24T02:51:09,339 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T02:51:09,344 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testSlowSyncLogRolling in 491 msec 2024-11-24T02:51:13,836 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-hbase.properties,hadoop-metrics2.properties 2024-11-24T02:51:13,890 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T02:51:13,890 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T02:51:13,893 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T02:51:13,893 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T02:51:13,895 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:51:13,895 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T02:51:13,895 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T02:51:13,895 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T02:51:13,898 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T02:51:13,901 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testSlowSyncLogRolling' 2024-11-24T02:51:18,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33511 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:51:18,913 INFO [RPCClient-NioEventLoopGroup-4-4 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testSlowSyncLogRolling completed 2024-11-24T02:51:18,916 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testSlowSyncLogRolling,, stopping at row=TestLogRolling-testSlowSyncLogRolling ,, for max=2147483647 with caching=100 2024-11-24T02:51:18,923 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testSlowSyncLogRolling 2024-11-24T02:51:18,924 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:51:18,925 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416678925 2024-11-24T02:51:18,933 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:18,933 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:18,934 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:18,934 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:18,934 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:18,934 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416667635 with entries=1, filesize=443 B; new WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416678925 2024-11-24T02:51:18,936 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35599:35599),(127.0.0.1/127.0.0.1:36079:36079)] 2024-11-24T02:51:18,936 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416667635 is not closed yet, will try archiving it next time 2024-11-24T02:51:18,938 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741833_1009 (size=451) 2024-11-24T02:51:18,940 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741833_1009 (size=451) 2024-11-24T02:51:18,941 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416667635 to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/oldWALs/7c69a60bd8f6%2C43175%2C1732416664278.1732416667635 2024-11-24T02:51:18,947 DEBUG [RPCClient-NioEventLoopGroup-4-3 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testSlowSyncLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f., hostname=7c69a60bd8f6,43175,1732416664278, seqNum=2] 2024-11-24T02:51:30,989 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43175 {}] regionserver.HRegion(8855): Flush requested on 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:30,993 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:51:31,062 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/c223e55b1f984443a5d747e6fe2655f2 is 1080, key is row0001/info:/1732416678950/Put/seqid=0 2024-11-24T02:51:31,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741838_1014 (size=12509) 2024-11-24T02:51:31,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741838_1014 (size=12509) 2024-11-24T02:51:31,085 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/c223e55b1f984443a5d747e6fe2655f2 2024-11-24T02:51:31,151 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/c223e55b1f984443a5d747e6fe2655f2 as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2 2024-11-24T02:51:31,161 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T02:51:31,167 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 95dc1c479890a1fb9c58ac03a9c1ca2f in 174ms, sequenceid=11, compaction requested=false 2024-11-24T02:51:31,168 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: 2024-11-24T02:51:32,151 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T02:51:39,006 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416699006 2024-11-24T02:51:39,214 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 205 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:51:39,214 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:39,214 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:39,214 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:39,214 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:39,215 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:39,215 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416678925 with entries=12, filesize=12.10 KB; new WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416699006 2024-11-24T02:51:39,216 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36079:36079),(127.0.0.1/127.0.0.1:35599:35599)] 2024-11-24T02:51:39,216 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416678925 is not closed yet, will try archiving it next time 2024-11-24T02:51:39,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741837_1013 (size=12399) 2024-11-24T02:51:39,217 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741837_1013 (size=12399) 2024-11-24T02:51:39,419 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:41,623 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:43,827 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:46,035 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:46,036 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43175 {}] regionserver.HRegion(8855): Flush requested on 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:51:46,036 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:51:46,241 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:46,255 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/5db5e9fd44e049b1a9cb344a6e51fe9a is 1080, key is row0008/info:/1732416692994/Put/seqid=0 2024-11-24T02:51:46,262 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741840_1016 (size=12509) 2024-11-24T02:51:46,263 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741840_1016 (size=12509) 2024-11-24T02:51:46,264 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/5db5e9fd44e049b1a9cb344a6e51fe9a 2024-11-24T02:51:46,275 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/5db5e9fd44e049b1a9cb344a6e51fe9a as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/5db5e9fd44e049b1a9cb344a6e51fe9a 2024-11-24T02:51:46,286 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/5db5e9fd44e049b1a9cb344a6e51fe9a, entries=7, sequenceid=21, filesize=12.2 K 2024-11-24T02:51:46,489 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:46,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 95dc1c479890a1fb9c58ac03a9c1ca2f in 453ms, sequenceid=21, compaction requested=false 2024-11-24T02:51:46,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: 2024-11-24T02:51:46,492 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=24.4 K, sizeToCheck=16.0 K 2024-11-24T02:51:46,492 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:51:46,494 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2 because midkey is the same as first or last row 2024-11-24T02:51:48,242 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:49,516 INFO [master/7c69a60bd8f6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T02:51:49,516 INFO [master/7c69a60bd8f6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T02:51:50,447 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:50,452 WARN [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(2201): Requesting log roll because we exceeded slow sync threshold; count=8, threshold=5, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:50,453 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C43175%2C1732416664278:(num 1732416699006) roll requested 2024-11-24T02:51:50,454 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416710454 2024-11-24T02:51:50,666 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 210 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK], DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK]] 2024-11-24T02:51:50,667 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:50,667 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:50,667 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:50,667 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:50,667 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:51:50,668 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416699006 with entries=8, filesize=7.55 KB; new WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416710454 2024-11-24T02:51:50,669 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35599:35599),(127.0.0.1/127.0.0.1:36079:36079)] 2024-11-24T02:51:50,669 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416699006 is not closed yet, will try archiving it next time 2024-11-24T02:51:50,669 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416678925 to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/oldWALs/7c69a60bd8f6%2C43175%2C1732416664278.1732416678925 2024-11-24T02:51:50,671 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741839_1015 (size=7739) 2024-11-24T02:51:50,672 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741839_1015 (size=7739) 2024-11-24T02:51:52,655 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 201 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:51:54,286 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 95dc1c479890a1fb9c58ac03a9c1ca2f, had cached 0 bytes from a total of 25018 2024-11-24T02:51:54,861 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:51:57,068 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 202 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:51:59,278 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 203 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:01,281 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T02:52:01,281 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416721281 2024-11-24T02:52:02,152 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T02:52:06,295 INFO [Time-limited test {}] wal.AbstractFSWAL(1368): Slow sync cost: 5011 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:06,298 WARN [Time-limited test {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5011 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:06,298 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C43175%2C1732416664278:(num 1732416721281) roll requested 2024-11-24T02:52:06,298 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:06,298 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:06,298 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:06,299 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:06,299 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:06,299 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416710454 with entries=4, filesize=4.63 KB; new WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416721281 2024-11-24T02:52:06,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35599:35599),(127.0.0.1/127.0.0.1:36079:36079)] 2024-11-24T02:52:06,300 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416710454 is not closed yet, will try archiving it next time 2024-11-24T02:52:06,301 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416726301 2024-11-24T02:52:06,302 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741841_1017 (size=4753) 2024-11-24T02:52:06,303 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741841_1017 (size=4753) 2024-11-24T02:52:11,304 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:11,304 WARN [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5000 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:11,304 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43175 {}] regionserver.HRegion(8855): Flush requested on 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:52:11,304 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:52:11,312 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1368): Slow sync cost: 5005 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:11,312 WARN [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5005 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:13,305 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T02:52:16,306 INFO [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1368): Slow sync cost: 5001 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:16,307 WARN [FSHLog-0-hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f-prefix:7c69a60bd8f6,43175,1732416664278 {}] wal.AbstractFSWAL(1374): Requesting log roll because we exceeded slow sync threshold; time=5001 ms, threshold=5000 ms, current pipeline: [DatanodeInfoWithStorage[127.0.0.1:36823,DS-56040c61-e91e-408b-8a45-7972f0254e98,DISK], DatanodeInfoWithStorage[127.0.0.1:38253,DS-3282ae6b-3ca8-4861-a556-a19d3b89b2d6,DISK]] 2024-11-24T02:52:16,307 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,307 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,307 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,307 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,307 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,308 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416721281 with entries=2, filesize=1.52 KB; new WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416726301 2024-11-24T02:52:16,309 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36079:36079),(127.0.0.1/127.0.0.1:35599:35599)] 2024-11-24T02:52:16,310 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416721281 is not closed yet, will try archiving it next time 2024-11-24T02:52:16,310 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C43175%2C1732416664278:(num 1732416726301) roll requested 2024-11-24T02:52:16,310 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741842_1018 (size=1569) 2024-11-24T02:52:16,310 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416736310 2024-11-24T02:52:16,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741842_1018 (size=1569) 2024-11-24T02:52:16,312 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/d93dba7a68c744d1ade0d3414bca21fb is 1080, key is row0015/info:/1732416708040/Put/seqid=0 2024-11-24T02:52:16,323 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,323 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,323 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,323 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741844_1020 (size=12509) 2024-11-24T02:52:16,323 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,323 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741844_1020 (size=12509) 2024-11-24T02:52:16,323 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416726301 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416736310 2024-11-24T02:52:16,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741843_1019 (size=93) 2024-11-24T02:52:16,325 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741843_1019 (size=93) 2024-11-24T02:52:16,326 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416726301 to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/oldWALs/7c69a60bd8f6%2C43175%2C1732416664278.1732416726301 2024-11-24T02:52:16,329 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36079:36079),(127.0.0.1/127.0.0.1:35599:35599)] 2024-11-24T02:52:16,329 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C43175%2C1732416664278.1732416736329 2024-11-24T02:52:16,339 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,339 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,340 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,340 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,340 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:16,340 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416736310 with entries=1, filesize=1.22 KB; new WAL /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/WALs/7c69a60bd8f6,43175,1732416664278/7c69a60bd8f6%2C43175%2C1732416664278.1732416736329 2024-11-24T02:52:16,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741845_1021 (size=1258) 2024-11-24T02:52:16,342 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741845_1021 (size=1258) 2024-11-24T02:52:16,346 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35599:35599),(127.0.0.1/127.0.0.1:36079:36079)] 2024-11-24T02:52:16,727 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=31 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/d93dba7a68c744d1ade0d3414bca21fb 2024-11-24T02:52:16,743 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/d93dba7a68c744d1ade0d3414bca21fb as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/d93dba7a68c744d1ade0d3414bca21fb 2024-11-24T02:52:16,752 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/d93dba7a68c744d1ade0d3414bca21fb, entries=7, sequenceid=31, filesize=12.2 K 2024-11-24T02:52:16,754 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=1.05 KB/1076 for 95dc1c479890a1fb9c58ac03a9c1ca2f in 5450ms, sequenceid=31, compaction requested=true 2024-11-24T02:52:16,754 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: 2024-11-24T02:52:16,754 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=36.6 K, sizeToCheck=16.0 K 2024-11-24T02:52:16,754 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:52:16,754 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2 because midkey is the same as first or last row 2024-11-24T02:52:16,756 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 95dc1c479890a1fb9c58ac03a9c1ca2f:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:52:16,757 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:52:16,757 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:52:16,760 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 37527 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:52:16,761 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.HStore(1541): 95dc1c479890a1fb9c58ac03a9c1ca2f/info is initiating minor compaction (all files) 2024-11-24T02:52:16,762 INFO [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 95dc1c479890a1fb9c58ac03a9c1ca2f/info in TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:52:16,762 INFO [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2, hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/5db5e9fd44e049b1a9cb344a6e51fe9a, hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/d93dba7a68c744d1ade0d3414bca21fb] into tmpdir=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp, totalSize=36.6 K 2024-11-24T02:52:16,763 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] compactions.Compactor(225): Compacting c223e55b1f984443a5d747e6fe2655f2, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732416678950 2024-11-24T02:52:16,764 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5db5e9fd44e049b1a9cb344a6e51fe9a, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=21, earliestPutTs=1732416692994 2024-11-24T02:52:16,765 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] compactions.Compactor(225): Compacting d93dba7a68c744d1ade0d3414bca21fb, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=31, earliestPutTs=1732416708040 2024-11-24T02:52:16,790 INFO [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 95dc1c479890a1fb9c58ac03a9c1ca2f#info#compaction#3 average throughput is 10.77 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:52:16,791 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/0d19ffd2154549669fd94af2bd7c6c38 is 1080, key is row0001/info:/1732416678950/Put/seqid=0 2024-11-24T02:52:16,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741847_1023 (size=27710) 2024-11-24T02:52:16,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741847_1023 (size=27710) 2024-11-24T02:52:16,807 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/0d19ffd2154549669fd94af2bd7c6c38 as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/0d19ffd2154549669fd94af2bd7c6c38 2024-11-24T02:52:16,823 INFO [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 95dc1c479890a1fb9c58ac03a9c1ca2f/info of 95dc1c479890a1fb9c58ac03a9c1ca2f into 0d19ffd2154549669fd94af2bd7c6c38(size=27.1 K), total size for store is 27.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:52:16,823 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: 2024-11-24T02:52:16,825 INFO [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f., storeName=95dc1c479890a1fb9c58ac03a9c1ca2f/info, priority=13, startTime=1732416736755; duration=0sec 2024-11-24T02:52:16,826 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T02:52:16,826 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:52:16,826 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/0d19ffd2154549669fd94af2bd7c6c38 because midkey is the same as first or last row 2024-11-24T02:52:16,826 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T02:52:16,826 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:52:16,826 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/0d19ffd2154549669fd94af2bd7c6c38 because midkey is the same as first or last row 2024-11-24T02:52:16,827 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=27.1 K, sizeToCheck=16.0 K 2024-11-24T02:52:16,827 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:52:16,827 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/0d19ffd2154549669fd94af2bd7c6c38 because midkey is the same as first or last row 2024-11-24T02:52:16,827 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:52:16,827 DEBUG [RS:0;7c69a60bd8f6:43175-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 95dc1c479890a1fb9c58ac03a9c1ca2f:info 2024-11-24T02:52:28,362 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=43175 {}] regionserver.HRegion(8855): Flush requested on 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:52:28,363 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:52:28,369 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/ee720471a8e241edb9a9792e8b1beacc is 1080, key is row0022/info:/1732416736331/Put/seqid=0 2024-11-24T02:52:28,378 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741848_1024 (size=12509) 2024-11-24T02:52:28,379 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741848_1024 (size=12509) 2024-11-24T02:52:28,379 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=42 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/ee720471a8e241edb9a9792e8b1beacc 2024-11-24T02:52:28,389 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/ee720471a8e241edb9a9792e8b1beacc as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/ee720471a8e241edb9a9792e8b1beacc 2024-11-24T02:52:28,396 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/ee720471a8e241edb9a9792e8b1beacc, entries=7, sequenceid=42, filesize=12.2 K 2024-11-24T02:52:28,397 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=0 B/0 for 95dc1c479890a1fb9c58ac03a9c1ca2f in 35ms, sequenceid=42, compaction requested=false 2024-11-24T02:52:28,398 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: 2024-11-24T02:52:28,398 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=39.3 K, sizeToCheck=16.0 K 2024-11-24T02:52:28,398 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:52:28,398 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/0d19ffd2154549669fd94af2bd7c6c38 because midkey is the same as first or last row 2024-11-24T02:52:32,152 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T02:52:36,380 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:52:36,381 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:52:36,382 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:52:36,392 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:36,393 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:36,393 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:52:36,393 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:52:36,393 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=698085998, stopped=false 2024-11-24T02:52:36,393 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,33511,1732416663011 2024-11-24T02:52:36,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:36,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:36,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:36,404 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:36,404 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:52:36,405 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:52:36,405 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:52:36,405 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:36,405 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:36,405 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:36,405 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,43175,1732416664278' ***** 2024-11-24T02:52:36,405 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:52:36,406 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:52:36,406 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:52:36,406 INFO [RS:0;7c69a60bd8f6:43175 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:52:36,406 INFO [RS:0;7c69a60bd8f6:43175 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:52:36,406 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(3091): Received CLOSE for 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:52:36,407 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,43175,1732416664278 2024-11-24T02:52:36,407 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:52:36,407 INFO [RS:0;7c69a60bd8f6:43175 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:43175. 2024-11-24T02:52:36,407 DEBUG [RS:0;7c69a60bd8f6:43175 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:52:36,407 DEBUG [RS:0;7c69a60bd8f6:43175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:36,407 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:52:36,407 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:52:36,407 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 95dc1c479890a1fb9c58ac03a9c1ca2f, disabling compactions & flushes 2024-11-24T02:52:36,407 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:52:36,407 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:52:36,407 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:52:36,407 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:52:36,407 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. after waiting 0 ms 2024-11-24T02:52:36,407 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:52:36,408 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T02:52:36,408 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f 1/1 column families, dataSize=3.15 KB heapSize=3.63 KB 2024-11-24T02:52:36,408 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1325): Online Regions={95dc1c479890a1fb9c58ac03a9c1ca2f=TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T02:52:36,408 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:52:36,408 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:52:36,408 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:52:36,408 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:52:36,408 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:52:36,408 DEBUG [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 95dc1c479890a1fb9c58ac03a9c1ca2f 2024-11-24T02:52:36,408 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.65 KB heapSize=3.67 KB 2024-11-24T02:52:36,413 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/488b9eb563f9471b9e0139437944e293 is 1080, key is row0029/info:/1732416750367/Put/seqid=0 2024-11-24T02:52:36,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741849_1025 (size=8193) 2024-11-24T02:52:36,421 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741849_1025 (size=8193) 2024-11-24T02:52:36,422 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=3.15 KB at sequenceid=48 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/488b9eb563f9471b9e0139437944e293 2024-11-24T02:52:36,430 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/info/3789f418b8ff4119bc1d80eef7996aca is 195, key is TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f./info:regioninfo/1732416669314/Put/seqid=0 2024-11-24T02:52:36,431 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/.tmp/info/488b9eb563f9471b9e0139437944e293 as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/488b9eb563f9471b9e0139437944e293 2024-11-24T02:52:36,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741850_1026 (size=7016) 2024-11-24T02:52:36,438 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741850_1026 (size=7016) 2024-11-24T02:52:36,439 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.45 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/info/3789f418b8ff4119bc1d80eef7996aca 2024-11-24T02:52:36,442 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/488b9eb563f9471b9e0139437944e293, entries=3, sequenceid=48, filesize=8.0 K 2024-11-24T02:52:36,444 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 95dc1c479890a1fb9c58ac03a9c1ca2f in 36ms, sequenceid=48, compaction requested=true 2024-11-24T02:52:36,444 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2, hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/5db5e9fd44e049b1a9cb344a6e51fe9a, hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/d93dba7a68c744d1ade0d3414bca21fb] to archive 2024-11-24T02:52:36,447 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T02:52:36,451 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2 to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/c223e55b1f984443a5d747e6fe2655f2 2024-11-24T02:52:36,453 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/5db5e9fd44e049b1a9cb344a6e51fe9a to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/5db5e9fd44e049b1a9cb344a6e51fe9a 2024-11-24T02:52:36,455 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/d93dba7a68c744d1ade0d3414bca21fb to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/archive/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/info/d93dba7a68c744d1ade0d3414bca21fb 2024-11-24T02:52:36,463 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/ns/bc012c1808bd4362a3d586e8d2a0c4df is 43, key is default/ns:d/1732416668601/Put/seqid=0 2024-11-24T02:52:36,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741851_1027 (size=5153) 2024-11-24T02:52:36,473 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741851_1027 (size=5153) 2024-11-24T02:52:36,473 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/ns/bc012c1808bd4362a3d586e8d2a0c4df 2024-11-24T02:52:36,472 DEBUG [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7c69a60bd8f6:33511 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T02:52:36,478 WARN [StoreCloser-TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [c223e55b1f984443a5d747e6fe2655f2=12509, 5db5e9fd44e049b1a9cb344a6e51fe9a=12509, d93dba7a68c744d1ade0d3414bca21fb=12509] 2024-11-24T02:52:36,482 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T02:52:36,482 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T02:52:36,486 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/default/TestLogRolling-testSlowSyncLogRolling/95dc1c479890a1fb9c58ac03a9c1ca2f/recovered.edits/51.seqid, newMaxSeqId=51, maxSeqId=1 2024-11-24T02:52:36,489 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:52:36,489 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 95dc1c479890a1fb9c58ac03a9c1ca2f: Waiting for close lock at 1732416756407Running coprocessor pre-close hooks at 1732416756407Disabling compacts and flushes for region at 1732416756407Disabling writes for close at 1732416756407Obtaining lock to block concurrent updates at 1732416756408 (+1 ms)Preparing flush snapshotting stores in 95dc1c479890a1fb9c58ac03a9c1ca2f at 1732416756408Finished memstore snapshotting TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f., syncing WAL and waiting on mvcc, flushsize=dataSize=3228, getHeapSize=3696, getOffHeapSize=0, getCellsCount=3 at 1732416756408Flushing stores of TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. at 1732416756409 (+1 ms)Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f/info: creating writer at 1732416756409Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f/info: appending metadata at 1732416756413 (+4 ms)Flushing 95dc1c479890a1fb9c58ac03a9c1ca2f/info: closing flushed file at 1732416756413Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f38bb83: reopening flushed file at 1732416756430 (+17 ms)Finished flush of dataSize ~3.15 KB/3228, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 95dc1c479890a1fb9c58ac03a9c1ca2f in 36ms, sequenceid=48, compaction requested=true at 1732416756444 (+14 ms)Writing region close event to WAL at 1732416756479 (+35 ms)Running coprocessor post-close hooks at 1732416756487 (+8 ms)Closed at 1732416756489 (+2 ms) 2024-11-24T02:52:36,490 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testSlowSyncLogRolling,,1732416668841.95dc1c479890a1fb9c58ac03a9c1ca2f. 2024-11-24T02:52:36,501 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/table/71d1153fb508476c9683af82b61ce03c is 73, key is TestLogRolling-testSlowSyncLogRolling/table:state/1732416669332/Put/seqid=0 2024-11-24T02:52:36,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741852_1028 (size=5396) 2024-11-24T02:52:36,508 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741852_1028 (size=5396) 2024-11-24T02:52:36,510 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=138 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/table/71d1153fb508476c9683af82b61ce03c 2024-11-24T02:52:36,518 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/info/3789f418b8ff4119bc1d80eef7996aca as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/info/3789f418b8ff4119bc1d80eef7996aca 2024-11-24T02:52:36,527 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/info/3789f418b8ff4119bc1d80eef7996aca, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T02:52:36,529 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/ns/bc012c1808bd4362a3d586e8d2a0c4df as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/ns/bc012c1808bd4362a3d586e8d2a0c4df 2024-11-24T02:52:36,539 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/ns/bc012c1808bd4362a3d586e8d2a0c4df, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T02:52:36,540 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/.tmp/table/71d1153fb508476c9683af82b61ce03c as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/table/71d1153fb508476c9683af82b61ce03c 2024-11-24T02:52:36,548 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/table/71d1153fb508476c9683af82b61ce03c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T02:52:36,550 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false 2024-11-24T02:52:36,556 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T02:52:36,557 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:52:36,557 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:52:36,557 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416756408Running coprocessor pre-close hooks at 1732416756408Disabling compacts and flushes for region at 1732416756408Disabling writes for close at 1732416756408Obtaining lock to block concurrent updates at 1732416756408Preparing flush snapshotting stores in 1588230740 at 1732416756408Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1692, getHeapSize=3696, getOffHeapSize=0, getCellsCount=14 at 1732416756408Flushing stores of hbase:meta,,1.1588230740 at 1732416756409 (+1 ms)Flushing 1588230740/info: creating writer at 1732416756410 (+1 ms)Flushing 1588230740/info: appending metadata at 1732416756430 (+20 ms)Flushing 1588230740/info: closing flushed file at 1732416756430Flushing 1588230740/ns: creating writer at 1732416756446 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732416756462 (+16 ms)Flushing 1588230740/ns: closing flushed file at 1732416756462Flushing 1588230740/table: creating writer at 1732416756484 (+22 ms)Flushing 1588230740/table: appending metadata at 1732416756501 (+17 ms)Flushing 1588230740/table: closing flushed file at 1732416756501Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@44635a55: reopening flushed file at 1732416756517 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@79e75d45: reopening flushed file at 1732416756528 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@56cb1e6c: reopening flushed file at 1732416756539 (+11 ms)Finished flush of dataSize ~1.65 KB/1692, heapSize ~3.38 KB/3456, currentSize=0 B/0 for 1588230740 in 142ms, sequenceid=11, compaction requested=false at 1732416756550 (+11 ms)Writing region close event to WAL at 1732416756552 (+2 ms)Running coprocessor post-close hooks at 1732416756557 (+5 ms)Closed at 1732416756557 2024-11-24T02:52:36,558 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:52:36,608 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,43175,1732416664278; all regions closed. 2024-11-24T02:52:36,610 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,611 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,611 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,611 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,611 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,614 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741834_1010 (size=3066) 2024-11-24T02:52:36,615 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741834_1010 (size=3066) 2024-11-24T02:52:36,621 DEBUG [RS:0;7c69a60bd8f6:43175 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/oldWALs 2024-11-24T02:52:36,621 INFO [RS:0;7c69a60bd8f6:43175 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C43175%2C1732416664278.meta:.meta(num 1732416668301) 2024-11-24T02:52:36,622 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,623 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,623 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,623 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,623 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:36,626 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741846_1022 (size=13040) 2024-11-24T02:52:36,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741846_1022 (size=13040) 2024-11-24T02:52:37,038 DEBUG [RS:0;7c69a60bd8f6:43175 {}] wal.AbstractFSWAL(1256): Moved 5 WAL file(s) to /user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/oldWALs 2024-11-24T02:52:37,038 INFO [RS:0;7c69a60bd8f6:43175 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C43175%2C1732416664278:(num 1732416736329) 2024-11-24T02:52:37,038 DEBUG [RS:0;7c69a60bd8f6:43175 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:37,038 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:52:37,038 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:52:37,038 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T02:52:37,038 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:52:37,038 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:52:37,039 INFO [RS:0;7c69a60bd8f6:43175 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:43175 2024-11-24T02:52:37,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:52:37,057 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,43175,1732416664278 2024-11-24T02:52:37,057 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:52:37,058 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,43175,1732416664278] 2024-11-24T02:52:37,152 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,43175,1732416664278 already deleted, retry=false 2024-11-24T02:52:37,152 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,43175,1732416664278 expired; onlineServers=0 2024-11-24T02:52:37,153 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,33511,1732416663011' ***** 2024-11-24T02:52:37,153 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:52:37,153 INFO [M:0;7c69a60bd8f6:33511 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:52:37,153 INFO [M:0;7c69a60bd8f6:33511 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:52:37,154 DEBUG [M:0;7c69a60bd8f6:33511 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:52:37,154 DEBUG [M:0;7c69a60bd8f6:33511 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:52:37,154 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:52:37,154 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416667168 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416667168,5,FailOnTimeoutGroup] 2024-11-24T02:52:37,154 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416667169 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416667169,5,FailOnTimeoutGroup] 2024-11-24T02:52:37,155 INFO [M:0;7c69a60bd8f6:33511 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:52:37,155 INFO [M:0;7c69a60bd8f6:33511 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:52:37,156 DEBUG [M:0;7c69a60bd8f6:33511 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:52:37,156 INFO [M:0;7c69a60bd8f6:33511 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:52:37,156 INFO [M:0;7c69a60bd8f6:33511 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:52:37,157 INFO [M:0;7c69a60bd8f6:33511 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:52:37,158 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:52:37,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:52:37,162 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:37,162 DEBUG [M:0;7c69a60bd8f6:33511 {}] zookeeper.ZKUtil(347): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T02:52:37,162 WARN [M:0;7c69a60bd8f6:33511 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T02:52:37,163 INFO [M:0;7c69a60bd8f6:33511 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/.lastflushedseqids 2024-11-24T02:52:37,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:37,168 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:43175-0x1016ac0666c0001, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:37,168 INFO [RS:0;7c69a60bd8f6:43175 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:52:37,169 INFO [RS:0;7c69a60bd8f6:43175 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,43175,1732416664278; zookeeper connection closed. 2024-11-24T02:52:37,169 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@33921c3b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@33921c3b 2024-11-24T02:52:37,169 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T02:52:37,176 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741853_1029 (size=130) 2024-11-24T02:52:37,177 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741853_1029 (size=130) 2024-11-24T02:52:37,177 INFO [M:0;7c69a60bd8f6:33511 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:52:37,177 INFO [M:0;7c69a60bd8f6:33511 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:52:37,177 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:52:37,177 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:37,178 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:37,178 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:52:37,178 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:37,178 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.02 KB heapSize=29.20 KB 2024-11-24T02:52:37,201 DEBUG [M:0;7c69a60bd8f6:33511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e2b1cd5218e44561bd139fab3e2ae3c7 is 82, key is hbase:meta,,1/info:regioninfo/1732416668386/Put/seqid=0 2024-11-24T02:52:37,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741854_1030 (size=5672) 2024-11-24T02:52:37,207 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741854_1030 (size=5672) 2024-11-24T02:52:37,208 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e2b1cd5218e44561bd139fab3e2ae3c7 2024-11-24T02:52:37,232 DEBUG [M:0;7c69a60bd8f6:33511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1368ac589abb46a4a18dc8a873f5af3c is 766, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732416669342/Put/seqid=0 2024-11-24T02:52:37,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741855_1031 (size=6247) 2024-11-24T02:52:37,247 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741855_1031 (size=6247) 2024-11-24T02:52:37,248 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.42 KB at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1368ac589abb46a4a18dc8a873f5af3c 2024-11-24T02:52:37,258 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1368ac589abb46a4a18dc8a873f5af3c 2024-11-24T02:52:37,274 DEBUG [M:0;7c69a60bd8f6:33511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9df55c59707c41bdbc5adcb51c2572fa is 69, key is 7c69a60bd8f6,43175,1732416664278/rs:state/1732416667357/Put/seqid=0 2024-11-24T02:52:37,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741856_1032 (size=5156) 2024-11-24T02:52:37,280 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741856_1032 (size=5156) 2024-11-24T02:52:37,281 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9df55c59707c41bdbc5adcb51c2572fa 2024-11-24T02:52:37,303 DEBUG [M:0;7c69a60bd8f6:33511 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9562e6ac6dd946c3867b075d56e3e692 is 52, key is load_balancer_on/state:d/1732416668819/Put/seqid=0 2024-11-24T02:52:37,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741857_1033 (size=5056) 2024-11-24T02:52:37,309 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741857_1033 (size=5056) 2024-11-24T02:52:37,310 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=59 (bloomFilter=true), to=hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9562e6ac6dd946c3867b075d56e3e692 2024-11-24T02:52:37,318 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/e2b1cd5218e44561bd139fab3e2ae3c7 as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e2b1cd5218e44561bd139fab3e2ae3c7 2024-11-24T02:52:37,325 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/e2b1cd5218e44561bd139fab3e2ae3c7, entries=8, sequenceid=59, filesize=5.5 K 2024-11-24T02:52:37,327 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/1368ac589abb46a4a18dc8a873f5af3c as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1368ac589abb46a4a18dc8a873f5af3c 2024-11-24T02:52:37,334 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 1368ac589abb46a4a18dc8a873f5af3c 2024-11-24T02:52:37,334 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/1368ac589abb46a4a18dc8a873f5af3c, entries=6, sequenceid=59, filesize=6.1 K 2024-11-24T02:52:37,336 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/9df55c59707c41bdbc5adcb51c2572fa as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9df55c59707c41bdbc5adcb51c2572fa 2024-11-24T02:52:37,341 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/9df55c59707c41bdbc5adcb51c2572fa, entries=1, sequenceid=59, filesize=5.0 K 2024-11-24T02:52:37,343 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/9562e6ac6dd946c3867b075d56e3e692 as hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9562e6ac6dd946c3867b075d56e3e692 2024-11-24T02:52:37,349 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/9562e6ac6dd946c3867b075d56e3e692, entries=1, sequenceid=59, filesize=4.9 K 2024-11-24T02:52:37,351 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 172ms, sequenceid=59, compaction requested=false 2024-11-24T02:52:37,352 INFO [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:37,353 DEBUG [M:0;7c69a60bd8f6:33511 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416757177Disabling compacts and flushes for region at 1732416757177Disabling writes for close at 1732416757178 (+1 ms)Obtaining lock to block concurrent updates at 1732416757178Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416757178Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23576, getHeapSize=29840, getOffHeapSize=0, getCellsCount=70 at 1732416757178Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416757180 (+2 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416757180Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416757200 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416757200Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416757214 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416757231 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416757232 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416757259 (+27 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416757273 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416757273Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732416757288 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732416757303 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732416757303Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7d7c3104: reopening flushed file at 1732416757317 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3c227456: reopening flushed file at 1732416757326 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3f8c9c20: reopening flushed file at 1732416757334 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6f258294: reopening flushed file at 1732416757342 (+8 ms)Finished flush of dataSize ~23.02 KB/23576, heapSize ~29.14 KB/29840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 172ms, sequenceid=59, compaction requested=false at 1732416757351 (+9 ms)Writing region close event to WAL at 1732416757352 (+1 ms)Closed at 1732416757352 2024-11-24T02:52:37,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:37,354 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:37,354 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:37,354 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:37,354 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:37,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36823 is added to blk_1073741830_1006 (size=27973) 2024-11-24T02:52:37,357 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:38253 is added to blk_1073741830_1006 (size=27973) 2024-11-24T02:52:37,358 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:52:37,358 INFO [M:0;7c69a60bd8f6:33511 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:52:37,358 INFO [M:0;7c69a60bd8f6:33511 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33511 2024-11-24T02:52:37,358 INFO [M:0;7c69a60bd8f6:33511 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:52:37,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:37,470 INFO [M:0;7c69a60bd8f6:33511 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:52:37,470 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33511-0x1016ac0666c0000, quorum=127.0.0.1:49774, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:37,471 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:52:37,474 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1bf97579{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:37,476 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@22b88bcb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:37,476 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:37,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2d48d695{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:37,477 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@11effdcd{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:37,480 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:52:37,480 WARN [BP-2137382314-172.17.0.2-1732416658234 heartbeating to localhost/127.0.0.1:44189 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:52:37,480 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:52:37,480 WARN [BP-2137382314-172.17.0.2-1732416658234 heartbeating to localhost/127.0.0.1:44189 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2137382314-172.17.0.2-1732416658234 (Datanode Uuid 990f560f-2fbc-4359-b95c-6a6f9aa695f4) service to localhost/127.0.0.1:44189 2024-11-24T02:52:37,481 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data3/current/BP-2137382314-172.17.0.2-1732416658234 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:37,482 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data4/current/BP-2137382314-172.17.0.2-1732416658234 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:37,482 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:52:37,488 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b07d1ba{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:37,489 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@43e0a762{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:37,489 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:37,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@371e191c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:37,489 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@28778f0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:37,491 WARN [BP-2137382314-172.17.0.2-1732416658234 heartbeating to localhost/127.0.0.1:44189 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:52:37,491 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:52:37,491 WARN [BP-2137382314-172.17.0.2-1732416658234 heartbeating to localhost/127.0.0.1:44189 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-2137382314-172.17.0.2-1732416658234 (Datanode Uuid c36d2f08-adc1-4f3c-9641-e7de8fcb0d61) service to localhost/127.0.0.1:44189 2024-11-24T02:52:37,491 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:52:37,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data1/current/BP-2137382314-172.17.0.2-1732416658234 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:37,492 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/cluster_dec33c80-770d-19be-0e70-3e98989acf26/data/data2/current/BP-2137382314-172.17.0.2-1732416658234 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:37,492 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:52:37,500 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@735fa16a{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:52:37,501 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6c26a5a3{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:37,501 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:37,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@70be1389{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:37,501 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@ddc8467{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:37,509 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:52:37,539 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:52:37,549 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testSlowSyncLogRolling Thread=77 (was 12) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44189 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RpcClient-timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: region-location-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1170) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-1 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:44189 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Monitor thread for TaskMonitor java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.monitoring.TaskMonitor$MonitorRunnable.run(TaskMonitor.java:325) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44189 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SSL Certificates Store Monitor java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.Object.wait(Object.java:338) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:537) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44189 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: GcTimeMonitor obsWindow = 60000, sleepInterval = 5000, maxGcTimePerc = 100 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.util.GcTimeMonitor.run(GcTimeMonitor.java:161) Potentially hanging thread: master/7c69a60bd8f6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-3 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: master/7c69a60bd8f6:0:becomeActiveMaster-MemStoreChunkPool Statistics java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44189 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-5-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-3-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Idle-Rpc-Conn-Sweeper-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: HMaster-EventLoopGroup-1-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: Time-limited test.named-queue-events-pool-0 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) app//com.lmax.disruptor.BlockingWaitStrategy.waitFor(BlockingWaitStrategy.java:47) app//com.lmax.disruptor.ProcessingSequenceBarrier.waitFor(ProcessingSequenceBarrier.java:56) app//com.lmax.disruptor.BatchEventProcessor.processEvents(BatchEventProcessor.java:159) app//com.lmax.disruptor.BatchEventProcessor.run(BatchEventProcessor.java:125) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: SnapshotHandlerChoreCleaner java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HBase-Metrics2-1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.awaitNanos(AbstractQueuedSynchronizer.java:1674) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:1182) java.base@17.0.11/java.util.concurrent.ScheduledThreadPoolExecutor$DelayedWorkQueue.take(ScheduledThreadPoolExecutor.java:899) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.getTask(ThreadPoolExecutor.java:1062) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1122) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-1-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-4-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: SessionTracker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.zookeeper.server.SessionTrackerImpl.run(SessionTrackerImpl.java:163) Potentially hanging thread: sync.3 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-3-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-5-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-2-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-2 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//org.apache.hadoop.fs.FileSystem$Statistics$StatisticsDataReferenceCleaner.run(FileSystem.java:4171) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:44189 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-3-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1724) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: regionserver/7c69a60bd8f6:0.procedureResultReporter java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.RemoteProcedureResultReporter.run(RemoteProcedureResultReporter.java:75) Potentially hanging thread: Timer for 'HBase' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: nioEventLoopGroup-2-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-4 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: weak-ref-cleaner-strictcontextstorage java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:155) java.base@17.0.11/java.lang.ref.ReferenceQueue.remove(ReferenceQueue.java:176) app//io.opentelemetry.context.StrictContextStorage$PendingScopes.run(StrictContextStorage.java:269) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: sync.2 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: nioEventLoopGroup-2-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.0@localhost:44189 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-4-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-4 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: org.apache.hadoop.hdfs.PeerCache@7fef2b31 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.PeerCache.run(PeerCache.java:253) app//org.apache.hadoop.hdfs.PeerCache.access$000(PeerCache.java:46) app//org.apache.hadoop.hdfs.PeerCache$1.run(PeerCache.java:124) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Async-Client-Retry-Timer-pool-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.waitForNextTick(HashedWheelTimer.java:598) app//org.apache.hbase.thirdparty.io.netty.util.HashedWheelTimer$Worker.run(HashedWheelTimer.java:494) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: sync.1 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.park(LockSupport.java:341) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionNode.block(AbstractQueuedSynchronizer.java:506) java.base@17.0.11/java.util.concurrent.ForkJoinPool.unmanagedBlock(ForkJoinPool.java:3465) java.base@17.0.11/java.util.concurrent.ForkJoinPool.managedBlock(ForkJoinPool.java:3436) java.base@17.0.11/java.util.concurrent.locks.AbstractQueuedSynchronizer$ConditionObject.await(AbstractQueuedSynchronizer.java:1625) java.base@17.0.11/java.util.concurrent.LinkedBlockingQueue.take(LinkedBlockingQueue.java:435) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.takeSyncRequest(FSHLog.java:426) app//org.apache.hadoop.hbase.regionserver.wal.FSHLog$SyncRunner.run(FSHLog.java:441) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:44189 from jenkins.hfs.0 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) - Thread LEAK? -, OpenFileDescriptor=402 (was 287) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=204 (was 288), ProcessCount=11 (was 11), AvailableMemoryMB=10091 (was 11456) 2024-11-24T02:52:37,556 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=78, OpenFileDescriptor=402, MaxFileDescriptor=1048576, SystemLoadAverage=204, ProcessCount=11, AvailableMemoryMB=10091 2024-11-24T02:52:37,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:52:37,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.log.dir so I do NOT create it in target/test-data/c124437b-69e6-c985-0013-ddffcb53e912 2024-11-24T02:52:37,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c4c12d94-5a7f-bb15-ae1d-53d3809aca9c/hadoop.tmp.dir so I do NOT create it in target/test-data/c124437b-69e6-c985-0013-ddffcb53e912 2024-11-24T02:52:37,556 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d, deleteOnExit=true 2024-11-24T02:52:37,556 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:52:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/test.cache.data in system properties and HBase conf 2024-11-24T02:52:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:52:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:52:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:52:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:52:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:52:37,557 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:52:37,557 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:52:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:52:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:52:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:52:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:52:37,558 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:52:37,559 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:52:37,573 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:52:37,936 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:37,942 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:37,943 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:37,943 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:37,943 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:52:37,944 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:37,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:37,944 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:38,040 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@55cb1221{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/java.io.tmpdir/jetty-localhost-39245-hadoop-hdfs-3_4_1-tests_jar-_-any-4122375873108382772/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:52:38,041 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:39245} 2024-11-24T02:52:38,041 INFO [Time-limited test {}] server.Server(415): Started @101782ms 2024-11-24T02:52:38,052 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:52:38,295 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:38,299 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:38,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:38,300 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:38,300 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:52:38,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:38,301 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:38,395 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4595827f{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/java.io.tmpdir/jetty-localhost-34225-hadoop-hdfs-3_4_1-tests_jar-_-any-4049086071803145777/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:38,395 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:34225} 2024-11-24T02:52:38,395 INFO [Time-limited test {}] server.Server(415): Started @102137ms 2024-11-24T02:52:38,397 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:52:38,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:38,437 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:38,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:38,438 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:38,438 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:52:38,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:38,439 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:38,533 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@da5059a{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/java.io.tmpdir/jetty-localhost-46837-hadoop-hdfs-3_4_1-tests_jar-_-any-2267555610780753573/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:38,534 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:46837} 2024-11-24T02:52:38,534 INFO [Time-limited test {}] server.Server(415): Started @102275ms 2024-11-24T02:52:38,536 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:52:39,553 WARN [Thread-445 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data1/current/BP-759610244-172.17.0.2-1732416757589/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:39,553 WARN [Thread-446 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data2/current/BP-759610244-172.17.0.2-1732416757589/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:39,573 WARN [Thread-409 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:52:39,575 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x906333789105058e with lease ID 0x1fb594a392c07a14: Processing first storage report for DS-1ef3c700-3b87-42af-8710-58e5648a39c3 from datanode DatanodeRegistration(127.0.0.1:41601, datanodeUuid=f55d45e7-1498-45d9-aa8a-85924ce6d1f8, infoPort=34287, infoSecurePort=0, ipcPort=40983, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589) 2024-11-24T02:52:39,575 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x906333789105058e with lease ID 0x1fb594a392c07a14: from storage DS-1ef3c700-3b87-42af-8710-58e5648a39c3 node DatanodeRegistration(127.0.0.1:41601, datanodeUuid=f55d45e7-1498-45d9-aa8a-85924ce6d1f8, infoPort=34287, infoSecurePort=0, ipcPort=40983, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:39,575 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x906333789105058e with lease ID 0x1fb594a392c07a14: Processing first storage report for DS-86438ee1-ecad-4aa2-8fb0-577fa9a066b9 from datanode DatanodeRegistration(127.0.0.1:41601, datanodeUuid=f55d45e7-1498-45d9-aa8a-85924ce6d1f8, infoPort=34287, infoSecurePort=0, ipcPort=40983, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589) 2024-11-24T02:52:39,576 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x906333789105058e with lease ID 0x1fb594a392c07a14: from storage DS-86438ee1-ecad-4aa2-8fb0-577fa9a066b9 node DatanodeRegistration(127.0.0.1:41601, datanodeUuid=f55d45e7-1498-45d9-aa8a-85924ce6d1f8, infoPort=34287, infoSecurePort=0, ipcPort=40983, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:39,689 WARN [Thread-457 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data4/current/BP-759610244-172.17.0.2-1732416757589/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:39,689 WARN [Thread-456 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data3/current/BP-759610244-172.17.0.2-1732416757589/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:39,710 WARN [Thread-432 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:52:39,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x674573a650be7ab1 with lease ID 0x1fb594a392c07a15: Processing first storage report for DS-a0a2b693-0296-457a-aa06-d775510c433b from datanode DatanodeRegistration(127.0.0.1:33051, datanodeUuid=3ea90afd-3b6d-4f43-be63-14dd5d1eb2d8, infoPort=36661, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589) 2024-11-24T02:52:39,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x674573a650be7ab1 with lease ID 0x1fb594a392c07a15: from storage DS-a0a2b693-0296-457a-aa06-d775510c433b node DatanodeRegistration(127.0.0.1:33051, datanodeUuid=3ea90afd-3b6d-4f43-be63-14dd5d1eb2d8, infoPort=36661, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:39,713 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x674573a650be7ab1 with lease ID 0x1fb594a392c07a15: Processing first storage report for DS-943f0d46-4506-46d1-a49e-c7f244ee72a3 from datanode DatanodeRegistration(127.0.0.1:33051, datanodeUuid=3ea90afd-3b6d-4f43-be63-14dd5d1eb2d8, infoPort=36661, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589) 2024-11-24T02:52:39,713 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x674573a650be7ab1 with lease ID 0x1fb594a392c07a15: from storage DS-943f0d46-4506-46d1-a49e-c7f244ee72a3 node DatanodeRegistration(127.0.0.1:33051, datanodeUuid=3ea90afd-3b6d-4f43-be63-14dd5d1eb2d8, infoPort=36661, infoSecurePort=0, ipcPort=36173, storageInfo=lv=-57;cid=testClusterID;nsid=1602963304;c=1732416757589), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:52:39,775 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912 2024-11-24T02:52:39,779 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/zookeeper_0, clientPort=59280, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:52:39,781 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59280 2024-11-24T02:52:39,781 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:39,784 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:39,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:52:39,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:52:39,795 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846 with version=8 2024-11-24T02:52:39,795 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase-staging 2024-11-24T02:52:39,797 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:52:39,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:39,797 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:39,798 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:52:39,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:39,798 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:52:39,798 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:52:39,798 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:52:39,799 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:42653 2024-11-24T02:52:39,800 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:42653 connecting to ZooKeeper ensemble=127.0.0.1:59280 2024-11-24T02:52:39,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:426530x0, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:52:39,853 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:42653-0x1016ac1e38d0000 connected 2024-11-24T02:52:39,937 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:39,943 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:39,947 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:39,947 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846, hbase.cluster.distributed=false 2024-11-24T02:52:39,949 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:52:39,949 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=42653 2024-11-24T02:52:39,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=42653 2024-11-24T02:52:39,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=42653 2024-11-24T02:52:39,950 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=42653 2024-11-24T02:52:39,951 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=42653 2024-11-24T02:52:39,969 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:52:39,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:39,969 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:39,970 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:52:39,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:39,970 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:52:39,970 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:52:39,970 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:52:39,970 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41933 2024-11-24T02:52:39,972 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41933 connecting to ZooKeeper ensemble=127.0.0.1:59280 2024-11-24T02:52:39,973 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:39,975 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:39,989 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419330x0, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:52:39,989 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419330x0, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:39,989 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41933-0x1016ac1e38d0001 connected 2024-11-24T02:52:39,990 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:52:39,991 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:52:39,991 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:52:39,992 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:52:39,996 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41933 2024-11-24T02:52:39,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41933 2024-11-24T02:52:39,998 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41933 2024-11-24T02:52:39,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41933 2024-11-24T02:52:39,999 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41933 2024-11-24T02:52:40,010 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:42653 2024-11-24T02:52:40,011 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:40,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:40,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:40,021 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:40,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:52:40,031 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,031 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:52:40,032 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,42653,1732416759797 from backup master directory 2024-11-24T02:52:40,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:40,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:40,041 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:52:40,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:40,041 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:40,047 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/hbase.id] with ID: 5ee60aaa-1af6-42e2-9364-84a9e45ba0b1 2024-11-24T02:52:40,047 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/.tmp/hbase.id 2024-11-24T02:52:40,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:52:40,054 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:52:40,054 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/.tmp/hbase.id]:[hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/hbase.id] 2024-11-24T02:52:40,068 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:40,068 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:52:40,070 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T02:52:40,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,083 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:52:40,092 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:52:40,496 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:52:40,499 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:52:40,500 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:52:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:52:40,514 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:52:40,515 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store 2024-11-24T02:52:40,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:52:40,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:52:40,524 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:40,524 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:52:40,525 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:40,525 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:40,525 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:52:40,525 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:40,525 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:40,525 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416760524Disabling compacts and flushes for region at 1732416760524Disabling writes for close at 1732416760525 (+1 ms)Writing region close event to WAL at 1732416760525Closed at 1732416760525 2024-11-24T02:52:40,526 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/.initializing 2024-11-24T02:52:40,526 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/WALs/7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:40,531 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C42653%2C1732416759797, suffix=, logDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/WALs/7c69a60bd8f6,42653,1732416759797, archiveDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/oldWALs, maxLogs=10 2024-11-24T02:52:40,531 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C42653%2C1732416759797.1732416760531 2024-11-24T02:52:40,538 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/WALs/7c69a60bd8f6,42653,1732416759797/7c69a60bd8f6%2C42653%2C1732416759797.1732416760531 2024-11-24T02:52:40,540 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36661:36661),(127.0.0.1/127.0.0.1:34287:34287)] 2024-11-24T02:52:40,541 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:52:40,541 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:40,541 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,541 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,544 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,546 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:52:40,547 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,547 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:40,548 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,551 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:52:40,551 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:52:40,552 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,556 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:52:40,556 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,557 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:52:40,558 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,559 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:52:40,559 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,560 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:52:40,560 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,561 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,561 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,563 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,563 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,563 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:52:40,565 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:40,567 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:52:40,567 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=726870, jitterRate=-0.07573765516281128}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:52:40,569 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416760542Initializing all the Stores at 1732416760543 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416760543Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416760543Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416760543Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416760543Cleaning up temporary data from old regions at 1732416760563 (+20 ms)Region opened successfully at 1732416760568 (+5 ms) 2024-11-24T02:52:40,569 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:52:40,573 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4efa4893, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:52:40,574 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:52:40,574 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:52:40,574 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:52:40,574 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:52:40,575 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T02:52:40,575 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T02:52:40,576 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:52:40,578 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:52:40,579 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:52:40,631 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:52:40,632 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:52:40,634 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:52:40,641 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:52:40,642 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:52:40,644 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:52:40,652 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:52:40,654 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:52:40,662 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:52:40,667 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:52:40,673 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:52:40,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:40,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:40,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,684 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,685 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,42653,1732416759797, sessionid=0x1016ac1e38d0000, setting cluster-up flag (Was=false) 2024-11-24T02:52:40,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,704 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,736 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:52:40,739 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:40,852 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,853 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:40,884 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:52:40,888 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:40,890 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:52:40,893 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:40,893 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:52:40,894 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:52:40,894 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,42653,1732416759797 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:52:40,896 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:40,897 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:40,897 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:40,897 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:40,897 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:52:40,897 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,897 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:52:40,897 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,898 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416790898 2024-11-24T02:52:40,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:52:40,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:52:40,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:52:40,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:52:40,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:52:40,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:52:40,899 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,900 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:52:40,900 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:40,900 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:52:40,900 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:52:40,900 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:52:40,901 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:52:40,901 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:52:40,901 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416760901,5,FailOnTimeoutGroup] 2024-11-24T02:52:40,902 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416760901,5,FailOnTimeoutGroup] 2024-11-24T02:52:40,902 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,902 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:52:40,902 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,902 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(746): ClusterId : 5ee60aaa-1af6-42e2-9364-84a9e45ba0b1 2024-11-24T02:52:40,902 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,902 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:52:40,902 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,902 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:52:40,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:52:40,911 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:52:40,912 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:52:40,912 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846 2024-11-24T02:52:40,916 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:52:40,916 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:52:40,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:52:40,919 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:52:40,920 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:40,921 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:52:40,923 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:52:40,923 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:40,924 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:52:40,926 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:52:40,926 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,926 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:52:40,926 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:40,927 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:52:40,927 DEBUG [RS:0;7c69a60bd8f6:41933 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7f00d9b9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:52:40,928 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:52:40,928 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:40,929 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:52:40,931 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:52:40,931 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:40,932 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:40,932 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:52:40,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740 2024-11-24T02:52:40,933 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740 2024-11-24T02:52:40,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:52:40,935 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:52:40,935 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:52:40,937 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:52:40,939 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:52:40,940 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:41933 2024-11-24T02:52:40,940 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879163, jitterRate=0.11791464686393738}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:52:40,940 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:52:40,940 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:52:40,940 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:52:40,941 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,42653,1732416759797 with port=41933, startcode=1732416759969 2024-11-24T02:52:40,941 DEBUG [RS:0;7c69a60bd8f6:41933 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:52:40,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416760920Initializing all the Stores at 1732416760921 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416760921Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416760921Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416760921Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416760921Cleaning up temporary data from old regions at 1732416760935 (+14 ms)Region opened successfully at 1732416760942 (+7 ms) 2024-11-24T02:52:40,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:52:40,942 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:52:40,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:52:40,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:52:40,942 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:52:40,943 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:52:40,943 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416760942Disabling compacts and flushes for region at 1732416760942Disabling writes for close at 1732416760942Writing region close event to WAL at 1732416760942Closed at 1732416760943 (+1 ms) 2024-11-24T02:52:40,944 INFO [HMaster-EventLoopGroup-5-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60085, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.1 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:52:40,944 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:40,944 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:52:40,944 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42653 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:40,944 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=42653 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:40,944 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:52:40,946 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:52:40,946 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846 2024-11-24T02:52:40,946 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36039 2024-11-24T02:52:40,946 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:52:40,947 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:52:40,957 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:52:40,958 DEBUG [RS:0;7c69a60bd8f6:41933 {}] zookeeper.ZKUtil(111): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:40,958 WARN [RS:0;7c69a60bd8f6:41933 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:52:40,958 INFO [RS:0;7c69a60bd8f6:41933 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:52:40,958 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/WALs/7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:40,958 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,41933,1732416759969] 2024-11-24T02:52:40,961 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:52:40,964 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:52:40,964 INFO [RS:0;7c69a60bd8f6:41933 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:52:40,964 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,964 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:52:40,965 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:52:40,965 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:40,966 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:52:40,967 DEBUG [RS:0;7c69a60bd8f6:41933 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:52:40,968 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,968 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,968 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,968 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,968 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,968 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41933,1732416759969-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:52:40,982 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:52:40,983 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41933,1732416759969-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,983 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,983 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.Replication(171): 7c69a60bd8f6,41933,1732416759969 started 2024-11-24T02:52:40,998 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:40,998 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,41933,1732416759969, RpcServer on 7c69a60bd8f6/172.17.0.2:41933, sessionid=0x1016ac1e38d0001 2024-11-24T02:52:40,998 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:52:40,998 DEBUG [RS:0;7c69a60bd8f6:41933 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:40,998 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,41933,1732416759969' 2024-11-24T02:52:40,998 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:52:40,999 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:52:41,000 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:52:41,000 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:52:41,000 DEBUG [RS:0;7c69a60bd8f6:41933 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:41,000 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,41933,1732416759969' 2024-11-24T02:52:41,000 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:52:41,000 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:52:41,001 DEBUG [RS:0;7c69a60bd8f6:41933 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:52:41,001 INFO [RS:0;7c69a60bd8f6:41933 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:52:41,001 INFO [RS:0;7c69a60bd8f6:41933 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:52:41,098 WARN [7c69a60bd8f6:42653 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T02:52:41,104 INFO [RS:0;7c69a60bd8f6:41933 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C41933%2C1732416759969, suffix=, logDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/WALs/7c69a60bd8f6,41933,1732416759969, archiveDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/oldWALs, maxLogs=32 2024-11-24T02:52:41,107 INFO [RS:0;7c69a60bd8f6:41933 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41933%2C1732416759969.1732416761107 2024-11-24T02:52:41,116 INFO [RS:0;7c69a60bd8f6:41933 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/WALs/7c69a60bd8f6,41933,1732416759969/7c69a60bd8f6%2C41933%2C1732416759969.1732416761107 2024-11-24T02:52:41,117 DEBUG [RS:0;7c69a60bd8f6:41933 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36661:36661),(127.0.0.1/127.0.0.1:34287:34287)] 2024-11-24T02:52:41,348 DEBUG [7c69a60bd8f6:42653 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T02:52:41,350 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:41,354 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,41933,1732416759969, state=OPENING 2024-11-24T02:52:41,431 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:52:41,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:41,442 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:41,444 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:52:41,444 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:41,444 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,41933,1732416759969}] 2024-11-24T02:52:41,444 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:41,491 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:41,500 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:41,602 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:52:41,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58431, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:52:41,614 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:52:41,614 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:52:41,619 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C41933%2C1732416759969.meta, suffix=.meta, logDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/WALs/7c69a60bd8f6,41933,1732416759969, archiveDir=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/oldWALs, maxLogs=32 2024-11-24T02:52:41,622 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41933%2C1732416759969.meta.1732416761622.meta 2024-11-24T02:52:41,628 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/WALs/7c69a60bd8f6,41933,1732416759969/7c69a60bd8f6%2C41933%2C1732416759969.meta.1732416761622.meta 2024-11-24T02:52:41,629 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:34287:34287),(127.0.0.1/127.0.0.1:36661:36661)] 2024-11-24T02:52:41,630 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:52:41,630 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:52:41,630 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:52:41,631 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:52:41,631 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:52:41,631 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:41,631 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:52:41,631 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:52:41,633 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:52:41,634 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:52:41,634 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:41,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:41,634 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:52:41,635 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:52:41,635 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:41,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:41,636 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:52:41,638 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:52:41,638 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:41,638 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:41,639 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:52:41,640 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:52:41,640 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:41,641 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:41,641 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:52:41,642 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740 2024-11-24T02:52:41,644 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740 2024-11-24T02:52:41,646 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:52:41,646 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:52:41,647 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:52:41,649 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:52:41,650 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=747014, jitterRate=-0.05012373626232147}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:52:41,650 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:52:41,651 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416761631Writing region info on filesystem at 1732416761631Initializing all the Stores at 1732416761632 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416761632Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416761632Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416761632Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416761632Cleaning up temporary data from old regions at 1732416761646 (+14 ms)Running coprocessor post-open hooks at 1732416761650 (+4 ms)Region opened successfully at 1732416761651 (+1 ms) 2024-11-24T02:52:41,652 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416761601 2024-11-24T02:52:41,655 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:52:41,655 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:52:41,656 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:41,657 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,41933,1732416759969, state=OPEN 2024-11-24T02:52:41,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:52:41,695 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:52:41,695 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:41,696 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:41,696 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:41,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:52:41,701 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,41933,1732416759969 in 252 msec 2024-11-24T02:52:41,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:52:41,706 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 757 msec 2024-11-24T02:52:41,707 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:41,707 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:52:41,710 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:52:41,710 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,41933,1732416759969, seqNum=-1] 2024-11-24T02:52:41,710 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:52:41,712 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58483, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:52:41,721 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 827 msec 2024-11-24T02:52:41,721 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416761721, completionTime=-1 2024-11-24T02:52:41,721 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T02:52:41,722 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:52:41,724 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T02:52:41,724 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732416821724 2024-11-24T02:52:41,724 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732416881724 2024-11-24T02:52:41,724 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T02:52:41,724 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,42653,1732416759797-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:41,724 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,42653,1732416759797-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:41,725 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,42653,1732416759797-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:41,725 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:42653, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:41,725 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:41,725 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:41,727 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:52:41,731 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.689sec 2024-11-24T02:52:41,731 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:52:41,731 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:52:41,731 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:52:41,731 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:52:41,731 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:52:41,731 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,42653,1732416759797-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:52:41,732 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,42653,1732416759797-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:52:41,735 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:52:41,735 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:52:41,735 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,42653,1732416759797-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:41,803 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@6512930c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:52:41,803 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,42653,-1 for getting cluster id 2024-11-24T02:52:41,804 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:52:41,806 DEBUG [HMaster-EventLoopGroup-5-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '5ee60aaa-1af6-42e2-9364-84a9e45ba0b1' 2024-11-24T02:52:41,806 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:52:41,806 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "5ee60aaa-1af6-42e2-9364-84a9e45ba0b1" 2024-11-24T02:52:41,807 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@60420209, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:52:41,807 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,42653,-1] 2024-11-24T02:52:41,807 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:52:41,808 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:41,809 INFO [HMaster-EventLoopGroup-5-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:38420, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:52:41,810 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bd69191, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:52:41,811 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:52:41,812 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,41933,1732416759969, seqNum=-1] 2024-11-24T02:52:41,812 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:52:41,814 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-6-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42976, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:52:41,816 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:41,817 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:41,820 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T02:52:41,820 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:52:41,821 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:52:41,821 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:52:41,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:41,821 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:41,821 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:52:41,821 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:52:41,821 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=604601029, stopped=false 2024-11-24T02:52:41,821 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,42653,1732416759797 2024-11-24T02:52:41,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:41,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:41,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:41,841 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:41,841 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:52:41,842 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:52:41,842 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at org.apache.hadoop.hbase.regionserver.wal.TestLogRolling.testLogRollOnDatanodeDeath(TestLogRolling.java:201) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.InvokeMethod.evaluate(InvokeMethod.java:17) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:52:41,842 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:41,842 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:41,842 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:41,842 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,41933,1732416759969' ***** 2024-11-24T02:52:41,842 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:52:41,842 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:52:41,843 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:41933. 2024-11-24T02:52:41,843 DEBUG [RS:0;7c69a60bd8f6:41933 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:52:41,843 DEBUG [RS:0;7c69a60bd8f6:41933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:52:41,843 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T02:52:41,844 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T02:52:41,844 DEBUG [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T02:52:41,844 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:52:41,844 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:52:41,844 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:52:41,844 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:52:41,844 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:52:41,844 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T02:52:41,860 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740/.tmp/ns/c117edb326f246c6a84039e5f5d7a165 is 43, key is default/ns:d/1732416761713/Put/seqid=0 2024-11-24T02:52:41,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741835_1011 (size=5153) 2024-11-24T02:52:41,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741835_1011 (size=5153) 2024-11-24T02:52:41,867 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740/.tmp/ns/c117edb326f246c6a84039e5f5d7a165 2024-11-24T02:52:41,875 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740/.tmp/ns/c117edb326f246c6a84039e5f5d7a165 as hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740/ns/c117edb326f246c6a84039e5f5d7a165 2024-11-24T02:52:41,882 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740/ns/c117edb326f246c6a84039e5f5d7a165, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T02:52:41,883 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false 2024-11-24T02:52:41,883 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T02:52:41,889 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T02:52:41,890 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:52:41,890 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:52:41,890 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416761844Running coprocessor pre-close hooks at 1732416761844Disabling compacts and flushes for region at 1732416761844Disabling writes for close at 1732416761844Obtaining lock to block concurrent updates at 1732416761844Preparing flush snapshotting stores in 1588230740 at 1732416761844Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732416761844Flushing stores of hbase:meta,,1.1588230740 at 1732416761845 (+1 ms)Flushing 1588230740/ns: creating writer at 1732416761845Flushing 1588230740/ns: appending metadata at 1732416761859 (+14 ms)Flushing 1588230740/ns: closing flushed file at 1732416761860 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@45a62f1e: reopening flushed file at 1732416761874 (+14 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 39ms, sequenceid=6, compaction requested=false at 1732416761883 (+9 ms)Writing region close event to WAL at 1732416761885 (+2 ms)Running coprocessor post-close hooks at 1732416761890 (+5 ms)Closed at 1732416761890 2024-11-24T02:52:41,890 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:52:41,969 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T02:52:41,969 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T02:52:42,017 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:52:42,028 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:42,044 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,41933,1732416759969; all regions closed. 2024-11-24T02:52:42,044 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,044 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,045 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,045 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,045 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741834_1010 (size=1152) 2024-11-24T02:52:42,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741834_1010 (size=1152) 2024-11-24T02:52:42,049 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:42,051 DEBUG [RS:0;7c69a60bd8f6:41933 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/oldWALs 2024-11-24T02:52:42,052 INFO [RS:0;7c69a60bd8f6:41933 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C41933%2C1732416759969.meta:.meta(num 1732416761622) 2024-11-24T02:52:42,052 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,052 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,052 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,052 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,052 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741833_1009 (size=93) 2024-11-24T02:52:42,055 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741833_1009 (size=93) 2024-11-24T02:52:42,058 DEBUG [RS:0;7c69a60bd8f6:41933 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/oldWALs 2024-11-24T02:52:42,058 INFO [RS:0;7c69a60bd8f6:41933 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C41933%2C1732416759969:(num 1732416761107) 2024-11-24T02:52:42,059 DEBUG [RS:0;7c69a60bd8f6:41933 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:42,059 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:52:42,059 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:52:42,059 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T02:52:42,059 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:52:42,059 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:52:42,059 INFO [RS:0;7c69a60bd8f6:41933 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41933 2024-11-24T02:52:42,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,41933,1732416759969 2024-11-24T02:52:42,071 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:52:42,071 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:52:42,072 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,41933,1732416759969] 2024-11-24T02:52:42,094 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,41933,1732416759969 already deleted, retry=false 2024-11-24T02:52:42,094 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,41933,1732416759969 expired; onlineServers=0 2024-11-24T02:52:42,094 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,42653,1732416759797' ***** 2024-11-24T02:52:42,094 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:52:42,094 INFO [M:0;7c69a60bd8f6:42653 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:52:42,094 INFO [M:0;7c69a60bd8f6:42653 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:52:42,094 DEBUG [M:0;7c69a60bd8f6:42653 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:52:42,094 DEBUG [M:0;7c69a60bd8f6:42653 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:52:42,094 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:52:42,094 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416760901 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416760901,5,FailOnTimeoutGroup] 2024-11-24T02:52:42,094 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416760901 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416760901,5,FailOnTimeoutGroup] 2024-11-24T02:52:42,095 INFO [M:0;7c69a60bd8f6:42653 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:52:42,095 INFO [M:0;7c69a60bd8f6:42653 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:52:42,095 DEBUG [M:0;7c69a60bd8f6:42653 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:52:42,095 INFO [M:0;7c69a60bd8f6:42653 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:52:42,095 INFO [M:0;7c69a60bd8f6:42653 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:52:42,095 INFO [M:0;7c69a60bd8f6:42653 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:52:42,095 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:52:42,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:52:42,104 DEBUG [M:0;7c69a60bd8f6:42653 {}] zookeeper.ZKUtil(347): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T02:52:42,104 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:42,104 WARN [M:0;7c69a60bd8f6:42653 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T02:52:42,105 INFO [M:0;7c69a60bd8f6:42653 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/.lastflushedseqids 2024-11-24T02:52:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741836_1012 (size=99) 2024-11-24T02:52:42,112 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741836_1012 (size=99) 2024-11-24T02:52:42,112 INFO [M:0;7c69a60bd8f6:42653 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:52:42,112 INFO [M:0;7c69a60bd8f6:42653 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:52:42,112 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:52:42,112 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:42,113 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:42,113 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:52:42,113 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:42,113 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T02:52:42,129 DEBUG [M:0;7c69a60bd8f6:42653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b675f9abab874853a1a4e0c05347cc58 is 82, key is hbase:meta,,1/info:regioninfo/1732416761656/Put/seqid=0 2024-11-24T02:52:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741837_1013 (size=5672) 2024-11-24T02:52:42,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741837_1013 (size=5672) 2024-11-24T02:52:42,137 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b675f9abab874853a1a4e0c05347cc58 2024-11-24T02:52:42,159 DEBUG [M:0;7c69a60bd8f6:42653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db289dcedc2944cda667295595444288 is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732416761720/Put/seqid=0 2024-11-24T02:52:42,164 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741838_1014 (size=5275) 2024-11-24T02:52:42,165 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741838_1014 (size=5275) 2024-11-24T02:52:42,169 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db289dcedc2944cda667295595444288 2024-11-24T02:52:42,183 INFO [RS:0;7c69a60bd8f6:41933 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:52:42,183 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:42,183 INFO [RS:0;7c69a60bd8f6:41933 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,41933,1732416759969; zookeeper connection closed. 2024-11-24T02:52:42,184 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41933-0x1016ac1e38d0001, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:42,184 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@7cf21c7f 2024-11-24T02:52:42,184 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T02:52:42,191 DEBUG [M:0;7c69a60bd8f6:42653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7322629546d242d7bc2ac39fa649069f is 69, key is 7c69a60bd8f6,41933,1732416759969/rs:state/1732416760945/Put/seqid=0 2024-11-24T02:52:42,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741839_1015 (size=5156) 2024-11-24T02:52:42,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741839_1015 (size=5156) 2024-11-24T02:52:42,600 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7322629546d242d7bc2ac39fa649069f 2024-11-24T02:52:42,633 DEBUG [M:0;7c69a60bd8f6:42653 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c91d0563dea409ab07851fd7acd1c5f is 52, key is load_balancer_on/state:d/1732416761819/Put/seqid=0 2024-11-24T02:52:42,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741840_1016 (size=5056) 2024-11-24T02:52:42,638 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741840_1016 (size=5056) 2024-11-24T02:52:42,638 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c91d0563dea409ab07851fd7acd1c5f 2024-11-24T02:52:42,645 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/b675f9abab874853a1a4e0c05347cc58 as hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b675f9abab874853a1a4e0c05347cc58 2024-11-24T02:52:42,651 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/b675f9abab874853a1a4e0c05347cc58, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T02:52:42,653 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/db289dcedc2944cda667295595444288 as hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/db289dcedc2944cda667295595444288 2024-11-24T02:52:42,659 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/db289dcedc2944cda667295595444288, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T02:52:42,660 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/7322629546d242d7bc2ac39fa649069f as hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7322629546d242d7bc2ac39fa649069f 2024-11-24T02:52:42,666 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/7322629546d242d7bc2ac39fa649069f, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T02:52:42,668 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/1c91d0563dea409ab07851fd7acd1c5f as hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c91d0563dea409ab07851fd7acd1c5f 2024-11-24T02:52:42,674 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36039/user/jenkins/test-data/bc2a6fc2-f600-1a13-28fe-9e319a8f4846/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/1c91d0563dea409ab07851fd7acd1c5f, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T02:52:42,675 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=29, compaction requested=false 2024-11-24T02:52:42,677 INFO [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:42,677 DEBUG [M:0;7c69a60bd8f6:42653 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416762112Disabling compacts and flushes for region at 1732416762112Disabling writes for close at 1732416762113 (+1 ms)Obtaining lock to block concurrent updates at 1732416762113Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416762113Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732416762113Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416762114 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416762114Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416762129 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416762129Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416762144 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416762159 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416762159Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416762175 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416762191 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416762191Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732416762615 (+424 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732416762632 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732416762632Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4896e021: reopening flushed file at 1732416762644 (+12 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@48493406: reopening flushed file at 1732416762651 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@73dbc758: reopening flushed file at 1732416762659 (+8 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@7197ec6f: reopening flushed file at 1732416762667 (+8 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 562ms, sequenceid=29, compaction requested=false at 1732416762675 (+8 ms)Writing region close event to WAL at 1732416762677 (+2 ms)Closed at 1732416762677 2024-11-24T02:52:42,677 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,677 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,678 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,678 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,678 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:42,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:41601 is added to blk_1073741830_1006 (size=10311) 2024-11-24T02:52:42,680 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33051 is added to blk_1073741830_1006 (size=10311) 2024-11-24T02:52:42,681 INFO [M:0;7c69a60bd8f6:42653 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:52:42,681 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:52:42,681 INFO [M:0;7c69a60bd8f6:42653 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:42653 2024-11-24T02:52:42,681 INFO [M:0;7c69a60bd8f6:42653 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:52:42,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:42,826 INFO [M:0;7c69a60bd8f6:42653 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:52:42,826 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:42653-0x1016ac1e38d0000, quorum=127.0.0.1:59280, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:52:42,830 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@da5059a{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:42,831 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@2220be00{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:42,831 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:42,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6a742c1f{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:42,831 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6082dc4f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:42,833 WARN [BP-759610244-172.17.0.2-1732416757589 heartbeating to localhost/127.0.0.1:36039 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:52:42,833 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:52:42,834 WARN [BP-759610244-172.17.0.2-1732416757589 heartbeating to localhost/127.0.0.1:36039 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759610244-172.17.0.2-1732416757589 (Datanode Uuid 3ea90afd-3b6d-4f43-be63-14dd5d1eb2d8) service to localhost/127.0.0.1:36039 2024-11-24T02:52:42,834 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:52:42,835 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data3/current/BP-759610244-172.17.0.2-1732416757589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:42,835 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data4/current/BP-759610244-172.17.0.2-1732416757589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:42,835 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:52:42,839 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4595827f{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:42,839 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3b01355c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:42,839 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:42,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7b58749b{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:42,840 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61783b0f{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:42,841 WARN [BP-759610244-172.17.0.2-1732416757589 heartbeating to localhost/127.0.0.1:36039 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:52:42,841 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:52:42,841 WARN [BP-759610244-172.17.0.2-1732416757589 heartbeating to localhost/127.0.0.1:36039 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-759610244-172.17.0.2-1732416757589 (Datanode Uuid f55d45e7-1498-45d9-aa8a-85924ce6d1f8) service to localhost/127.0.0.1:36039 2024-11-24T02:52:42,841 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:52:42,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data1/current/BP-759610244-172.17.0.2-1732416757589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:42,842 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/cluster_eea24812-20b9-982f-5a3a-6f17208bb33d/data/data2/current/BP-759610244-172.17.0.2-1732416757589 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:42,842 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:52:42,847 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@55cb1221{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:52:42,848 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@542ee468{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:42,848 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:42,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@737d6115{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:42,849 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@54b8bf96{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:42,856 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:52:42,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:52:42,873 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.log.dir so I do NOT create it in target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/c124437b-69e6-c985-0013-ddffcb53e912/hadoop.tmp.dir so I do NOT create it in target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b, deleteOnExit=true 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/test.cache.data in system properties and HBase conf 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:52:42,874 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:52:42,874 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:52:42,875 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:52:42,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:52:42,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:52:42,876 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:52:42,887 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:52:42,969 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:52:43,203 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:43,209 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:43,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:43,210 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:43,210 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:52:43,211 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:43,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:43,211 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:43,304 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@94a50db{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir/jetty-localhost-39887-hadoop-hdfs-3_4_1-tests_jar-_-any-2132254975591216617/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:52:43,304 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:39887} 2024-11-24T02:52:43,304 INFO [Time-limited test {}] server.Server(415): Started @107045ms 2024-11-24T02:52:43,316 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:52:43,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:52:43,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T02:52:43,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T02:52:43,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testSlowSyncLogRolling 2024-11-24T02:52:43,555 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:43,559 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:43,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:43,559 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:43,559 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:52:43,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@68a89b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:43,560 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cb9bebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:43,653 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5d327fd2{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir/jetty-localhost-33869-hadoop-hdfs-3_4_1-tests_jar-_-any-5941077255472604482/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:43,654 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4c80aceb{HTTP/1.1, (http/1.1)}{localhost:33869} 2024-11-24T02:52:43,654 INFO [Time-limited test {}] server.Server(415): Started @107395ms 2024-11-24T02:52:43,655 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:52:43,686 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:43,690 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:43,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:43,691 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:43,691 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:52:43,691 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@74fcfaad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:43,692 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f7f19bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:43,787 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@597807df{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir/jetty-localhost-32825-hadoop-hdfs-3_4_1-tests_jar-_-any-539739755043732198/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:43,787 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@401bd933{HTTP/1.1, (http/1.1)}{localhost:32825} 2024-11-24T02:52:43,787 INFO [Time-limited test {}] server.Server(415): Started @107528ms 2024-11-24T02:52:43,788 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:52:44,806 WARN [Thread-665 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data1/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:44,806 WARN [Thread-666 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data2/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:44,825 WARN [Thread-629 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:52:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9efbde2d0702c11 with lease ID 0x2543051345156243: Processing first storage report for DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c from datanode DatanodeRegistration(127.0.0.1:40699, datanodeUuid=fb18e73b-afb9-4486-9352-c50c562c05e4, infoPort=35127, infoSecurePort=0, ipcPort=34869, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9efbde2d0702c11 with lease ID 0x2543051345156243: from storage DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c node DatanodeRegistration(127.0.0.1:40699, datanodeUuid=fb18e73b-afb9-4486-9352-c50c562c05e4, infoPort=35127, infoSecurePort=0, ipcPort=34869, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xd9efbde2d0702c11 with lease ID 0x2543051345156243: Processing first storage report for DS-1eca9f70-8fc9-4b2d-bc52-c6cfc40ee41e from datanode DatanodeRegistration(127.0.0.1:40699, datanodeUuid=fb18e73b-afb9-4486-9352-c50c562c05e4, infoPort=35127, infoSecurePort=0, ipcPort=34869, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:44,828 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd9efbde2d0702c11 with lease ID 0x2543051345156243: from storage DS-1eca9f70-8fc9-4b2d-bc52-c6cfc40ee41e node DatanodeRegistration(127.0.0.1:40699, datanodeUuid=fb18e73b-afb9-4486-9352-c50c562c05e4, infoPort=35127, infoSecurePort=0, ipcPort=34869, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:52:44,941 WARN [Thread-677 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data4/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:44,941 WARN [Thread-676 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data3/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:44,960 WARN [Thread-652 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:52:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5eab74a93adfd3a5 with lease ID 0x2543051345156244: Processing first storage report for DS-15870cd1-c887-4843-8d12-c21d96ed70ee from datanode DatanodeRegistration(127.0.0.1:36945, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=37517, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5eab74a93adfd3a5 with lease ID 0x2543051345156244: from storage DS-15870cd1-c887-4843-8d12-c21d96ed70ee node DatanodeRegistration(127.0.0.1:36945, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=37517, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:52:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x5eab74a93adfd3a5 with lease ID 0x2543051345156244: Processing first storage report for DS-1e275f96-c8ef-4113-ad22-9483b2a54541 from datanode DatanodeRegistration(127.0.0.1:36945, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=37517, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:44,963 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x5eab74a93adfd3a5 with lease ID 0x2543051345156244: from storage DS-1e275f96-c8ef-4113-ad22-9483b2a54541 node DatanodeRegistration(127.0.0.1:36945, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=37517, infoSecurePort=0, ipcPort=41263, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:45,031 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2 2024-11-24T02:52:45,037 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/zookeeper_0, clientPort=59188, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:52:45,038 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59188 2024-11-24T02:52:45,039 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:45,041 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:45,052 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:52:45,053 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:52:45,054 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad with version=8 2024-11-24T02:52:45,054 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase-staging 2024-11-24T02:52:45,056 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:52:45,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:45,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:45,056 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:52:45,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:45,056 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:52:45,056 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:52:45,057 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:52:45,057 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35999 2024-11-24T02:52:45,059 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:35999 connecting to ZooKeeper ensemble=127.0.0.1:59188 2024-11-24T02:52:45,107 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:359990x0, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:52:45,108 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:35999-0x1016ac1f8160000 connected 2024-11-24T02:52:45,188 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:45,190 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:45,192 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:45,192 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad, hbase.cluster.distributed=false 2024-11-24T02:52:45,194 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:52:45,194 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35999 2024-11-24T02:52:45,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35999 2024-11-24T02:52:45,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35999 2024-11-24T02:52:45,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35999 2024-11-24T02:52:45,195 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35999 2024-11-24T02:52:45,209 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:52:45,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:45,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:45,210 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:52:45,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:45,210 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:52:45,210 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:52:45,210 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:52:45,211 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34067 2024-11-24T02:52:45,212 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34067 connecting to ZooKeeper ensemble=127.0.0.1:59188 2024-11-24T02:52:45,213 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:45,214 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:45,230 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:340670x0, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:52:45,231 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:340670x0, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:52:45,231 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34067-0x1016ac1f8160001 connected 2024-11-24T02:52:45,231 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:52:45,232 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:52:45,233 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:52:45,234 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:52:45,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34067 2024-11-24T02:52:45,234 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34067 2024-11-24T02:52:45,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34067 2024-11-24T02:52:45,235 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34067 2024-11-24T02:52:45,236 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34067 2024-11-24T02:52:45,252 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:35999 2024-11-24T02:52:45,252 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:45,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:45,262 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:45,262 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:45,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:52:45,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:45,272 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:45,273 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:52:45,274 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,35999,1732416765056 from backup master directory 2024-11-24T02:52:45,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:45,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:45,283 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:52:45,283 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:52:45,283 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:45,288 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/hbase.id] with ID: ce5ef9df-4b51-472d-be5c-4ede92b2dc44 2024-11-24T02:52:45,288 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/.tmp/hbase.id 2024-11-24T02:52:45,294 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:52:45,295 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:52:45,296 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/.tmp/hbase.id]:[hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/hbase.id] 2024-11-24T02:52:45,312 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:45,312 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:52:45,313 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T02:52:45,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:45,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:45,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:52:45,332 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:52:45,333 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:52:45,334 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:52:45,334 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:52:45,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:52:45,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:52:45,752 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store 2024-11-24T02:52:45,762 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:52:45,763 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:52:45,763 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:45,763 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:52:45,764 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:45,764 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:45,764 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:52:45,764 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:45,764 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:52:45,764 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416765763Disabling compacts and flushes for region at 1732416765763Disabling writes for close at 1732416765764 (+1 ms)Writing region close event to WAL at 1732416765764Closed at 1732416765764 2024-11-24T02:52:45,765 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/.initializing 2024-11-24T02:52:45,765 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:45,768 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C35999%2C1732416765056, suffix=, logDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056, archiveDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/oldWALs, maxLogs=10 2024-11-24T02:52:45,768 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 2024-11-24T02:52:45,774 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 2024-11-24T02:52:45,775 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35127:35127),(127.0.0.1/127.0.0.1:37517:37517)] 2024-11-24T02:52:45,776 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:52:45,776 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:45,776 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,776 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,779 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,780 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:52:45,780 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:45,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:45,781 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:52:45,783 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:45,783 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:52:45,784 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,785 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:52:45,785 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:45,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:52:45,786 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,787 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:52:45,788 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:45,788 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:52:45,788 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,789 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,789 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,791 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,791 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:52:45,793 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:52:45,795 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:52:45,796 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=829933, jitterRate=0.05531442165374756}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:52:45,797 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416765777Initializing all the Stores at 1732416765778 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416765778Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416765778Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416765778Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416765779 (+1 ms)Cleaning up temporary data from old regions at 1732416765791 (+12 ms)Region opened successfully at 1732416765797 (+6 ms) 2024-11-24T02:52:45,797 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:52:45,801 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@33e3aae7, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:52:45,802 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:52:45,802 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:52:45,802 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:52:45,802 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:52:45,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T02:52:45,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T02:52:45,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:52:45,806 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:52:45,807 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:52:45,892 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:52:45,892 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:52:45,893 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:52:45,955 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:52:45,955 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:52:45,957 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:52:45,967 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:52:45,968 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:52:45,977 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:52:45,979 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:52:45,988 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:52:45,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:45,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:52:45,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:45,999 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:45,999 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,35999,1732416765056, sessionid=0x1016ac1f8160000, setting cluster-up flag (Was=false) 2024-11-24T02:52:46,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:46,020 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:46,051 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:52:46,053 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:46,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:46,072 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:46,104 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:52:46,108 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:46,111 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:52:46,115 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:46,116 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:52:46,116 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:52:46,117 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,35999,1732416765056 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:52:46,120 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:46,120 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:46,120 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:46,120 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:52:46,120 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:52:46,121 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,121 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:52:46,121 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,121 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416796121 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,122 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:52:46,123 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:52:46,123 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:46,123 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:52:46,123 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:52:46,123 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:52:46,123 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:52:46,123 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416766123,5,FailOnTimeoutGroup] 2024-11-24T02:52:46,123 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416766123,5,FailOnTimeoutGroup] 2024-11-24T02:52:46,123 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,124 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:52:46,124 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,124 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,124 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,124 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:52:46,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:52:46,131 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:52:46,132 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:52:46,132 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad 2024-11-24T02:52:46,138 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:52:46,139 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(746): ClusterId : ce5ef9df-4b51-472d-be5c-4ede92b2dc44 2024-11-24T02:52:46,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:52:46,139 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:52:46,139 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:46,140 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:52:46,142 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:52:46,142 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,142 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:52:46,143 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:52:46,143 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,144 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:52:46,145 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:52:46,145 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,146 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:52:46,147 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:52:46,147 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:52:46,147 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:52:46,147 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,148 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,148 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:52:46,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740 2024-11-24T02:52:46,149 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740 2024-11-24T02:52:46,150 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:52:46,151 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:52:46,151 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:52:46,153 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:52:46,156 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:52:46,156 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=732073, jitterRate=-0.06912168860435486}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:52:46,157 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416766139Initializing all the Stores at 1732416766140 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416766140Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416766140Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416766140Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416766140Cleaning up temporary data from old regions at 1732416766151 (+11 ms)Region opened successfully at 1732416766157 (+6 ms) 2024-11-24T02:52:46,157 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:52:46,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:52:46,158 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:52:46,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:52:46,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:52:46,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:52:46,158 DEBUG [RS:0;7c69a60bd8f6:34067 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@30890fb0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:52:46,158 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:52:46,158 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416766158Disabling compacts and flushes for region at 1732416766158Disabling writes for close at 1732416766158Writing region close event to WAL at 1732416766158Closed at 1732416766158 2024-11-24T02:52:46,160 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:46,160 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:52:46,160 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:52:46,161 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:52:46,162 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:52:46,171 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:34067 2024-11-24T02:52:46,171 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:52:46,171 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:52:46,171 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:52:46,172 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,35999,1732416765056 with port=34067, startcode=1732416765209 2024-11-24T02:52:46,172 DEBUG [RS:0;7c69a60bd8f6:34067 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:52:46,174 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48777, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.2 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:52:46,174 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35999 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,175 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35999 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,176 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad 2024-11-24T02:52:46,177 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46305 2024-11-24T02:52:46,177 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:52:46,287 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:52:46,287 DEBUG [RS:0;7c69a60bd8f6:34067 {}] zookeeper.ZKUtil(111): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,287 WARN [RS:0;7c69a60bd8f6:34067 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:52:46,287 INFO [RS:0;7c69a60bd8f6:34067 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:52:46,288 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,288 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,34067,1732416765209] 2024-11-24T02:52:46,291 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:52:46,293 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:52:46,294 INFO [RS:0;7c69a60bd8f6:34067 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:52:46,294 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,294 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:52:46,295 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:52:46,295 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,295 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,295 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,295 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,295 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,295 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:52:46,296 DEBUG [RS:0;7c69a60bd8f6:34067 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:52:46,296 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,296 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,297 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,297 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,297 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,297 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34067,1732416765209-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:52:46,310 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:52:46,310 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34067,1732416765209-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,310 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,310 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.Replication(171): 7c69a60bd8f6,34067,1732416765209 started 2024-11-24T02:52:46,313 WARN [7c69a60bd8f6:35999 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T02:52:46,324 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,325 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,34067,1732416765209, RpcServer on 7c69a60bd8f6/172.17.0.2:34067, sessionid=0x1016ac1f8160001 2024-11-24T02:52:46,325 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:52:46,325 DEBUG [RS:0;7c69a60bd8f6:34067 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,325 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34067,1732416765209' 2024-11-24T02:52:46,325 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:52:46,326 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:52:46,326 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:52:46,326 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:52:46,326 DEBUG [RS:0;7c69a60bd8f6:34067 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,326 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34067,1732416765209' 2024-11-24T02:52:46,326 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:52:46,327 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:52:46,327 DEBUG [RS:0;7c69a60bd8f6:34067 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:52:46,327 INFO [RS:0;7c69a60bd8f6:34067 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:52:46,327 INFO [RS:0;7c69a60bd8f6:34067 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:52:46,431 INFO [RS:0;7c69a60bd8f6:34067 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34067%2C1732416765209, suffix=, logDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209, archiveDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs, maxLogs=32 2024-11-24T02:52:46,434 INFO [RS:0;7c69a60bd8f6:34067 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 2024-11-24T02:52:46,442 INFO [RS:0;7c69a60bd8f6:34067 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 2024-11-24T02:52:46,443 DEBUG [RS:0;7c69a60bd8f6:34067 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37517:37517),(127.0.0.1/127.0.0.1:35127:35127)] 2024-11-24T02:52:46,563 DEBUG [7c69a60bd8f6:35999 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T02:52:46,564 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,567 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,34067,1732416765209, state=OPENING 2024-11-24T02:52:46,578 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:52:46,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:46,589 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:52:46,590 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:52:46,590 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:46,590 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:46,590 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34067,1732416765209}] 2024-11-24T02:52:46,748 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:52:46,754 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:48055, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:52:46,761 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:52:46,761 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:52:46,764 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34067%2C1732416765209.meta, suffix=.meta, logDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209, archiveDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs, maxLogs=32 2024-11-24T02:52:46,765 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta 2024-11-24T02:52:46,773 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta 2024-11-24T02:52:46,774 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35127:35127),(127.0.0.1/127.0.0.1:37517:37517)] 2024-11-24T02:52:46,776 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:52:46,776 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:52:46,777 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:52:46,777 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:52:46,777 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:52:46,777 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:46,777 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:52:46,777 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:52:46,780 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:52:46,781 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:52:46,782 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,782 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,783 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:52:46,784 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:52:46,784 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,785 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:52:46,787 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:52:46,787 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,788 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:52:46,789 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:52:46,789 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:46,789 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:52:46,789 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:52:46,790 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740 2024-11-24T02:52:46,791 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740 2024-11-24T02:52:46,793 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:52:46,793 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:52:46,793 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:52:46,795 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:52:46,796 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=772919, jitterRate=-0.017183929681777954}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:52:46,796 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:52:46,797 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416766777Writing region info on filesystem at 1732416766777Initializing all the Stores at 1732416766779 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416766779Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416766780 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416766780Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416766780Cleaning up temporary data from old regions at 1732416766793 (+13 ms)Running coprocessor post-open hooks at 1732416766796 (+3 ms)Region opened successfully at 1732416766797 (+1 ms) 2024-11-24T02:52:46,798 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416766747 2024-11-24T02:52:46,801 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:52:46,801 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:52:46,802 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,803 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,34067,1732416765209, state=OPEN 2024-11-24T02:52:46,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:52:46,836 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:52:46,836 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:46,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:46,836 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:52:46,839 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:52:46,840 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34067,1732416765209 in 246 msec 2024-11-24T02:52:46,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:52:46,842 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 679 msec 2024-11-24T02:52:46,843 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:52:46,843 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:52:46,845 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:52:46,845 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,34067,1732416765209, seqNum=-1] 2024-11-24T02:52:46,845 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:52:46,846 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:33965, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:52:46,852 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 738 msec 2024-11-24T02:52:46,853 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416766853, completionTime=-1 2024-11-24T02:52:46,853 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T02:52:46,853 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:52:46,855 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T02:52:46,855 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732416826855 2024-11-24T02:52:46,855 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732416886855 2024-11-24T02:52:46,855 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T02:52:46,855 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35999,1732416765056-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,855 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35999,1732416765056-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,855 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35999,1732416765056-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,856 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:35999, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,856 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,856 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,858 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.577sec 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35999,1732416765056-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:52:46,860 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35999,1732416765056-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:52:46,862 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:52:46,863 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:52:46,863 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35999,1732416765056-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:46,940 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@69b06183, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:52:46,941 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,35999,-1 for getting cluster id 2024-11-24T02:52:46,941 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:52:46,945 DEBUG [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'ce5ef9df-4b51-472d-be5c-4ede92b2dc44' 2024-11-24T02:52:46,946 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:52:46,947 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "ce5ef9df-4b51-472d-be5c-4ede92b2dc44" 2024-11-24T02:52:46,948 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@170e53ab, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:52:46,948 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,35999,-1] 2024-11-24T02:52:46,948 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:52:46,948 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:52:46,950 INFO [HMaster-EventLoopGroup-7-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39882, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:52:46,951 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4bb75251, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:52:46,951 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:52:46,952 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,34067,1732416765209, seqNum=-1] 2024-11-24T02:52:46,952 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:52:46,954 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-8-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60038, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:52:46,956 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:46,956 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:46,960 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T02:52:46,983 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:52:46,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:46,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:46,983 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:52:46,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:52:46,983 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:52:46,983 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:52:46,984 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:52:46,984 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:44053 2024-11-24T02:52:46,986 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:44053 connecting to ZooKeeper ensemble=127.0.0.1:59188 2024-11-24T02:52:46,987 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:46,990 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:52:47,041 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:440530x0, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:52:47,042 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:440530x0, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on existing znode=/hbase/running 2024-11-24T02:52:47,042 DEBUG [pool-381-thread-1 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: INIT 2024-11-24T02:52:47,043 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:52:47,044 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:44053-0x1016ac1f8160002 connected 2024-11-24T02:52:47,049 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:52:47,050 DEBUG [Time-limited test {}] zookeeper.ZKUtil(111): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:52:47,052 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:52:47,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=44053 2024-11-24T02:52:47,053 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=44053 2024-11-24T02:52:47,060 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=44053 2024-11-24T02:52:47,064 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=44053 2024-11-24T02:52:47,067 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=44053 2024-11-24T02:52:47,071 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(746): ClusterId : ce5ef9df-4b51-472d-be5c-4ede92b2dc44 2024-11-24T02:52:47,071 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:52:47,084 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:52:47,084 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:52:47,094 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:52:47,095 DEBUG [RS:1;7c69a60bd8f6:44053 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66c7d5d9, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:52:47,112 DEBUG [RS:1;7c69a60bd8f6:44053 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:1;7c69a60bd8f6:44053 2024-11-24T02:52:47,112 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:52:47,112 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:52:47,112 DEBUG [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:52:47,113 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,35999,1732416765056 with port=44053, startcode=1732416766982 2024-11-24T02:52:47,113 DEBUG [RS:1;7c69a60bd8f6:44053 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:52:47,115 INFO [HMaster-EventLoopGroup-7-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:43373, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.3 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:52:47,116 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35999 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,44053,1732416766982 2024-11-24T02:52:47,116 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=35999 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,44053,1732416766982 2024-11-24T02:52:47,117 DEBUG [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad 2024-11-24T02:52:47,118 DEBUG [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46305 2024-11-24T02:52:47,118 DEBUG [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:52:47,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:52:47,125 DEBUG [RS:1;7c69a60bd8f6:44053 {}] zookeeper.ZKUtil(111): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,44053,1732416766982 2024-11-24T02:52:47,126 WARN [RS:1;7c69a60bd8f6:44053 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:52:47,126 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,44053,1732416766982] 2024-11-24T02:52:47,126 INFO [RS:1;7c69a60bd8f6:44053 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:52:47,126 DEBUG [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982 2024-11-24T02:52:47,129 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:52:47,131 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:52:47,131 INFO [RS:1;7c69a60bd8f6:44053 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:52:47,131 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,131 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:52:47,132 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:52:47,132 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,132 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,132 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,132 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,132 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:52:47,133 DEBUG [RS:1;7c69a60bd8f6:44053 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:52:47,133 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,133 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,134 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,134 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,134 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,134 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,44053,1732416766982-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:52:47,136 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:52:47,139 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:47,154 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:52:47,154 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,44053,1732416766982-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,154 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,154 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.Replication(171): 7c69a60bd8f6,44053,1732416766982 started 2024-11-24T02:52:47,160 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:47,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:47,163 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:47,174 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:52:47,175 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,44053,1732416766982, RpcServer on 7c69a60bd8f6/172.17.0.2:44053, sessionid=0x1016ac1f8160002 2024-11-24T02:52:47,175 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:52:47,175 DEBUG [RS:1;7c69a60bd8f6:44053 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,44053,1732416766982 2024-11-24T02:52:47,175 INFO [Time-limited test {}] hbase.HBaseTestingUtil(2882): Started new server=Thread[RS:1;7c69a60bd8f6:44053,5,FailOnTimeoutGroup] 2024-11-24T02:52:47,175 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,44053,1732416766982' 2024-11-24T02:52:47,175 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:52:47,175 INFO [Time-limited test {}] wal.TestLogRolling(207): Replication=2 2024-11-24T02:52:47,176 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:52:47,176 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T02:52:47,176 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:52:47,176 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:52:47,176 DEBUG [RS:1;7c69a60bd8f6:44053 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,44053,1732416766982 2024-11-24T02:52:47,176 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,44053,1732416766982' 2024-11-24T02:52:47,176 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:52:47,177 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:52:47,177 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.AsyncConnectionImpl(321): The fetched master address is 7c69a60bd8f6,35999,1732416765056 2024-11-24T02:52:47,177 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@445e0c01 2024-11-24T02:52:47,177 DEBUG [RS:1;7c69a60bd8f6:44053 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:52:47,177 INFO [RS:1;7c69a60bd8f6:44053 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:52:47,177 INFO [RS:1;7c69a60bd8f6:44053 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:52:47,177 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T02:52:47,179 INFO [HMaster-EventLoopGroup-7-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39886, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T02:52:47,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T02:52:47,180 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T02:52:47,180 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnDatanodeDeath', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:52:47,182 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T02:52:47,183 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T02:52:47,183 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:47,183 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnDatanodeDeath" procId is: 4 2024-11-24T02:52:47,185 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:52:47,185 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T02:52:47,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741835_1011 (size=393) 2024-11-24T02:52:47,197 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741835_1011 (size=393) 2024-11-24T02:52:47,199 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 7aa7cd2006b90d267df7d090f44cfe26, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnDatanodeDeath', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad 2024-11-24T02:52:47,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36945 is added to blk_1073741836_1012 (size=76) 2024-11-24T02:52:47,206 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40699 is added to blk_1073741836_1012 (size=76) 2024-11-24T02:52:47,207 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:47,207 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1722): Closing 7aa7cd2006b90d267df7d090f44cfe26, disabling compactions & flushes 2024-11-24T02:52:47,207 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:47,207 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:47,207 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. after waiting 0 ms 2024-11-24T02:52:47,207 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:47,207 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:47,207 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnDatanodeDeath-pool-0 {}] regionserver.HRegion(1676): Region close journal for 7aa7cd2006b90d267df7d090f44cfe26: Waiting for close lock at 1732416767207Disabling compacts and flushes for region at 1732416767207Disabling writes for close at 1732416767207Writing region close event to WAL at 1732416767207Closed at 1732416767207 2024-11-24T02:52:47,209 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T02:52:47,209 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.","families":{"info":[{"qualifier":"regioninfo","vlen":75,"tag":[],"timestamp":"1732416767209"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416767209"}]},"ts":"1732416767209"} 2024-11-24T02:52:47,212 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T02:52:47,214 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T02:52:47,214 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416767214"}]},"ts":"1732416767214"} 2024-11-24T02:52:47,217 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLING in hbase:meta 2024-11-24T02:52:47,217 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7aa7cd2006b90d267df7d090f44cfe26, ASSIGN}] 2024-11-24T02:52:47,219 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7aa7cd2006b90d267df7d090f44cfe26, ASSIGN 2024-11-24T02:52:47,220 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7aa7cd2006b90d267df7d090f44cfe26, ASSIGN; state=OFFLINE, location=7c69a60bd8f6,34067,1732416765209; forceNewPlan=false, retain=false 2024-11-24T02:52:47,280 INFO [RS:1;7c69a60bd8f6:44053 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C44053%2C1732416766982, suffix=, logDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982, archiveDir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs, maxLogs=32 2024-11-24T02:52:47,281 INFO [RS:1;7c69a60bd8f6:44053 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 2024-11-24T02:52:47,287 INFO [RS:1;7c69a60bd8f6:44053 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 2024-11-24T02:52:47,288 DEBUG [RS:1;7c69a60bd8f6:44053 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:35127:35127),(127.0.0.1/127.0.0.1:37517:37517)] 2024-11-24T02:52:47,371 INFO [7c69a60bd8f6:35999 {}] balancer.BaseLoadBalancer(388): Reassigned 1 regions. 1 retained the pre-restart assignment. 2024-11-24T02:52:47,372 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7aa7cd2006b90d267df7d090f44cfe26, regionState=OPENING, regionLocation=7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:47,376 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7aa7cd2006b90d267df7d090f44cfe26, ASSIGN because future has completed 2024-11-24T02:52:47,377 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7aa7cd2006b90d267df7d090f44cfe26, server=7c69a60bd8f6,34067,1732416765209}] 2024-11-24T02:52:47,546 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:47,546 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 7aa7cd2006b90d267df7d090f44cfe26, NAME => 'TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:52:47,547 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnDatanodeDeath 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,547 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:52:47,547 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,547 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,549 INFO [StoreOpener-7aa7cd2006b90d267df7d090f44cfe26-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,551 INFO [StoreOpener-7aa7cd2006b90d267df7d090f44cfe26-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 7aa7cd2006b90d267df7d090f44cfe26 columnFamilyName info 2024-11-24T02:52:47,551 DEBUG [StoreOpener-7aa7cd2006b90d267df7d090f44cfe26-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:52:47,552 INFO [StoreOpener-7aa7cd2006b90d267df7d090f44cfe26-1 {}] regionserver.HStore(327): Store=7aa7cd2006b90d267df7d090f44cfe26/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:52:47,552 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,553 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,554 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,555 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,555 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,556 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,559 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:52:47,559 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 7aa7cd2006b90d267df7d090f44cfe26; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=770706, jitterRate=-0.019996851682662964}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:52:47,559 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:52:47,560 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 7aa7cd2006b90d267df7d090f44cfe26: Running coprocessor pre-open hook at 1732416767547Writing region info on filesystem at 1732416767547Initializing all the Stores at 1732416767549 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416767549Cleaning up temporary data from old regions at 1732416767555 (+6 ms)Running coprocessor post-open hooks at 1732416767559 (+4 ms)Region opened successfully at 1732416767560 (+1 ms) 2024-11-24T02:52:47,561 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26., pid=6, masterSystemTime=1732416767534 2024-11-24T02:52:47,564 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:47,564 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:47,565 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=7aa7cd2006b90d267df7d090f44cfe26, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,34067,1732416765209 2024-11-24T02:52:47,568 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-8-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 7aa7cd2006b90d267df7d090f44cfe26, server=7c69a60bd8f6,34067,1732416765209 because future has completed 2024-11-24T02:52:47,572 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T02:52:47,573 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 7aa7cd2006b90d267df7d090f44cfe26, server=7c69a60bd8f6,34067,1732416765209 in 192 msec 2024-11-24T02:52:47,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T02:52:47,575 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnDatanodeDeath, region=7aa7cd2006b90d267df7d090f44cfe26, ASSIGN in 356 msec 2024-11-24T02:52:47,576 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T02:52:47,576 DEBUG [PEWorker-2 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnDatanodeDeath","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416767576"}]},"ts":"1732416767576"} 2024-11-24T02:52:47,579 INFO [PEWorker-2 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnDatanodeDeath, state=ENABLED in hbase:meta 2024-11-24T02:52:47,580 INFO [PEWorker-2 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T02:52:47,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnDatanodeDeath in 400 msec 2024-11-24T02:52:52,280 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:52:52,282 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:52,301 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:52,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:52,303 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:52:52,310 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnDatanodeDeath' 2024-11-24T02:52:53,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T02:52:53,405 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T02:52:53,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T02:52:53,405 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath Metrics about Tables on a single HBase RegionServer 2024-11-24T02:52:53,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:52:53,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T02:52:53,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T02:52:53,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T02:52:57,281 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=35999 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:52:57,282 INFO [RPCClient-NioEventLoopGroup-4-11 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnDatanodeDeath completed 2024-11-24T02:52:57,282 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnDatanodeDeath,, stopping at row=TestLogRolling-testLogRollOnDatanodeDeath ,, for max=2147483647 with caching=100 2024-11-24T02:52:57,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T02:52:57,286 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:52:57,301 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:57,305 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:57,305 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:57,305 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:57,306 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:52:57,306 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2af8c71d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:57,307 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3ad9bbfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:57,400 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@78e445ac{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir/jetty-localhost-37581-hadoop-hdfs-3_4_1-tests_jar-_-any-5997065984499877682/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:57,400 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5c0b8b07{HTTP/1.1, (http/1.1)}{localhost:37581} 2024-11-24T02:52:57,400 INFO [Time-limited test {}] server.Server(415): Started @121142ms 2024-11-24T02:52:57,402 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:52:57,430 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:57,433 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:57,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:57,434 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:57,434 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:52:57,434 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@12827689{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:57,435 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@23038dc2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:57,528 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@3ad4ed7c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir/jetty-localhost-37693-hadoop-hdfs-3_4_1-tests_jar-_-any-13969884557527592641/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:57,528 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@756ea16d{HTTP/1.1, (http/1.1)}{localhost:37693} 2024-11-24T02:52:57,528 INFO [Time-limited test {}] server.Server(415): Started @121269ms 2024-11-24T02:52:57,529 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:52:57,562 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:52:57,564 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:52:57,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:52:57,565 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:52:57,565 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:52:57,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@69248046{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:52:57,566 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7524e7e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:52:57,663 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7eb01e24{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir/jetty-localhost-41213-hadoop-hdfs-3_4_1-tests_jar-_-any-12406338049327381835/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:57,664 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4949cd53{HTTP/1.1, (http/1.1)}{localhost:41213} 2024-11-24T02:52:57,664 INFO [Time-limited test {}] server.Server(415): Started @121405ms 2024-11-24T02:52:57,665 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:52:58,908 WARN [Thread-861 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data5/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:58,908 WARN [Thread-862 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data6/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:58,924 WARN [Thread-802 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:52:58,926 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd4ebc3f3a626363 with lease ID 0x2543051345156245: Processing first storage report for DS-15975a09-2384-4fe6-aac0-e8ee50f26af6 from datanode DatanodeRegistration(127.0.0.1:34139, datanodeUuid=db426a4e-f47e-441e-82e9-4eb48c4c0a2d, infoPort=35737, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:58,926 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd4ebc3f3a626363 with lease ID 0x2543051345156245: from storage DS-15975a09-2384-4fe6-aac0-e8ee50f26af6 node DatanodeRegistration(127.0.0.1:34139, datanodeUuid=db426a4e-f47e-441e-82e9-4eb48c4c0a2d, infoPort=35737, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:58,926 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdd4ebc3f3a626363 with lease ID 0x2543051345156245: Processing first storage report for DS-673871c4-b9bb-4d03-b621-035302d20725 from datanode DatanodeRegistration(127.0.0.1:34139, datanodeUuid=db426a4e-f47e-441e-82e9-4eb48c4c0a2d, infoPort=35737, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:58,927 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdd4ebc3f3a626363 with lease ID 0x2543051345156245: from storage DS-673871c4-b9bb-4d03-b621-035302d20725 node DatanodeRegistration(127.0.0.1:34139, datanodeUuid=db426a4e-f47e-441e-82e9-4eb48c4c0a2d, infoPort=35737, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:58,958 WARN [Thread-872 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data7/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:58,958 WARN [Thread-873 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data8/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:58,984 WARN [Thread-824 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:52:58,986 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb56a431d38bb5c7 with lease ID 0x2543051345156246: Processing first storage report for DS-046b3858-d28d-4940-862c-dde121ce5de8 from datanode DatanodeRegistration(127.0.0.1:39763, datanodeUuid=ece4a389-2fb7-441a-a2b6-dfce68549f3f, infoPort=37991, infoSecurePort=0, ipcPort=33963, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:58,986 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb56a431d38bb5c7 with lease ID 0x2543051345156246: from storage DS-046b3858-d28d-4940-862c-dde121ce5de8 node DatanodeRegistration(127.0.0.1:39763, datanodeUuid=ece4a389-2fb7-441a-a2b6-dfce68549f3f, infoPort=37991, infoSecurePort=0, ipcPort=33963, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:58,986 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xdb56a431d38bb5c7 with lease ID 0x2543051345156246: Processing first storage report for DS-85cdb1cc-0e38-452f-9f86-198430b6b913 from datanode DatanodeRegistration(127.0.0.1:39763, datanodeUuid=ece4a389-2fb7-441a-a2b6-dfce68549f3f, infoPort=37991, infoSecurePort=0, ipcPort=33963, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:58,986 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xdb56a431d38bb5c7 with lease ID 0x2543051345156246: from storage DS-85cdb1cc-0e38-452f-9f86-198430b6b913 node DatanodeRegistration(127.0.0.1:39763, datanodeUuid=ece4a389-2fb7-441a-a2b6-dfce68549f3f, infoPort=37991, infoSecurePort=0, ipcPort=33963, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:59,020 WARN [Thread-883 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:59,020 WARN [Thread-884 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10/current/BP-450919650-172.17.0.2-1732416762899/current, will proceed with Du for space computation calculation, 2024-11-24T02:52:59,034 WARN [Thread-846 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:52:59,037 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3654b8915606156 with lease ID 0x2543051345156247: Processing first storage report for DS-ab05df6c-0fbb-40ba-b789-460231ac12e0 from datanode DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:59,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3654b8915606156 with lease ID 0x2543051345156247: from storage DS-ab05df6c-0fbb-40ba-b789-460231ac12e0 node DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:52:59,037 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe3654b8915606156 with lease ID 0x2543051345156247: Processing first storage report for DS-2dcfd28a-71d6-4fe4-a996-25d5a872766c from datanode DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899) 2024-11-24T02:52:59,037 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe3654b8915606156 with lease ID 0x2543051345156247: from storage DS-2dcfd28a-71d6-4fe4-a996-25d5a872766c node DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:52:59,102 WARN [ResponseProcessor for block BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,102 WARN [ResponseProcessor for block BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010 java.io.IOException: Bad response ERROR for BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010 from datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,102 WARN [ResponseProcessor for block BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013 java.io.IOException: Bad response ERROR for BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013 from datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,102 WARN [ResponseProcessor for block BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006 java.io.IOException: Bad response ERROR for BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006 from datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,103 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta block BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:52:59,103 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 block BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:52:59,103 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 block BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:52:59,103 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 block BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:52:59,103 WARN [PacketResponder: BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36945] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,103 WARN [PacketResponder: BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36945] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,104 WARN [PacketResponder: BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:36945] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Connection reset by peer at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_341908246_22 at /127.0.0.1:37196 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:40699:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37196 dst: /127.0.0.1:40699 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:37170 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:40699:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37170 dst: /127.0.0.1:40699 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:58284 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:36945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58284 dst: /127.0.0.1:36945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,105 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1221326814_22 at /127.0.0.1:37148 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:40699:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37148 dst: /127.0.0.1:40699 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,106 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_341908246_22 at /127.0.0.1:58302 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013] {}] datanode.DataXceiver(331): 127.0.0.1:36945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58302 dst: /127.0.0.1:36945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,106 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:58286 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:36945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58286 dst: /127.0.0.1:36945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,107 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-1221326814_22 at /127.0.0.1:58244 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:36945:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:58244 dst: /127.0.0.1:36945 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,107 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:37160 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:40699:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37160 dst: /127.0.0.1:40699 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@597807df{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:59,108 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@401bd933{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:59,108 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:59,108 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f7f19bf{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:59,109 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@74fcfaad{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:59,110 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:52:59,110 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:52:59,110 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-450919650-172.17.0.2-1732416762899 (Datanode Uuid 1edc601b-a53b-40fd-945a-3baf2679ddc6) service to localhost/127.0.0.1:46305 2024-11-24T02:52:59,110 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:52:59,110 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data3/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:59,111 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data4/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:59,111 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:52:59,118 ERROR [org.apache.hadoop.hdfs.server.datanode.DataXceiver@30d47bef {}] datanode.DataXceiver(331): 127.0.0.1:40699:DataXceiver error processing unknown operation src: /127.0.0.1:40032 dst: /127.0.0.1:40699 java.io.IOException: Server closed. at org.apache.hadoop.hdfs.server.datanode.DataXceiverServer.addPeer(DataXceiverServer.java:334) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:232) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:52:59,118 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta block BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741834_1010 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,118 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 block BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741833_1009 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,118 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 block BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741837_1013 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,118 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 block BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741830_1006 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:118) ~[hadoop-common-3.4.1.jar:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at java.io.FilterInputStream.read(FilterInputStream.java:82) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:527) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1931) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1666) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5d327fd2{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:52:59,120 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4c80aceb{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:52:59,120 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:52:59,120 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cb9bebc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:52:59,121 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@68a89b56{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,STOPPED} 2024-11-24T02:52:59,121 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:52:59,121 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:52:59,122 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-450919650-172.17.0.2-1732416762899 (Datanode Uuid fb18e73b-afb9-4486-9352-c50c562c05e4) service to localhost/127.0.0.1:46305 2024-11-24T02:52:59,122 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:52:59,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data1/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:59,122 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data2/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:52:59,122 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:52:59,126 DEBUG [RPCClient-NioEventLoopGroup-4-10 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnDatanodeDeath', row='row0002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26., hostname=7c69a60bd8f6,34067,1732416765209, seqNum=2] 2024-11-24T02:52:59,127 ERROR [FSHLog-0-hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad-prefix:7c69a60bd8f6,34067,1732416765209 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,127 WARN [FSHLog-0-hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad-prefix:7c69a60bd8f6,34067,1732416765209 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,127 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34067%2C1732416765209:(num 1732416766433) roll requested 2024-11-24T02:52:59,127 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 2024-11-24T02:52:59,134 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,134 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:59,135 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:59,135 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:59,135 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:59,135 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:52:59,135 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 with entries=1, filesize=455 B; new WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 2024-11-24T02:52:59,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,135 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:52:59,136 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(47): Initialize RecoverLeaseFSUtils 2024-11-24T02:52:59,136 DEBUG [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(59): set recoverLeaseMethod to org.apache.hadoop.fs.LeaseRecoverable.recoverLease() 2024-11-24T02:52:59,137 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 2024-11-24T02:52:59,138 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:37991:37991),(127.0.0.1/127.0.0.1:35737:35737)] 2024-11-24T02:52:59,138 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 is not closed yet, will try archiving it next time 2024-11-24T02:52:59,139 WARN [IPC Server handler 1 on default port 46305 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 has not been closed. Lease recovery is in progress. RecoveryId = 1019 for block blk_1073741833_1009 2024-11-24T02:52:59,142 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 after 4ms 2024-11-24T02:52:59,336 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:01,135 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:01,139 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:01,140 INFO [Time-limited test {}] wal.TestLogRolling(261): log.getCurrentFileName(): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 2024-11-24T02:53:01,141 WARN [ResponseProcessor for block BP-450919650-172.17.0.2-1732416762899:blk_1073741838_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-450919650-172.17.0.2-1732416762899:blk_1073741838_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:01,142 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 block BP-450919650-172.17.0.2-1732416762899:blk_1073741838_1018 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741838_1018 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:01,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:59316 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:39763:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:59316 dst: /127.0.0.1:39763 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:01,143 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:56860 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741838_1018] {}] datanode.DataXceiver(331): 127.0.0.1:34139:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:56860 dst: /127.0.0.1:34139 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:01,182 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@3ad4ed7c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:01,183 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@756ea16d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:01,183 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:01,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@23038dc2{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:01,184 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@12827689{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:01,187 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:01,187 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:01,187 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-450919650-172.17.0.2-1732416762899 (Datanode Uuid ece4a389-2fb7-441a-a2b6-dfce68549f3f) service to localhost/127.0.0.1:46305 2024-11-24T02:53:01,187 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:01,188 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data7/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:01,188 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data8/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:01,188 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:01,336 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:03,135 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:03,139 WARN [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]] 2024-11-24T02:53:03,139 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:03,140 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34067%2C1732416765209:(num 1732416779127) roll requested 2024-11-24T02:53:03,140 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.1732416783140 2024-11-24T02:53:03,143 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 after 4006ms 2024-11-24T02:53:03,144 WARN [Thread-904 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741839_1021 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39763 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:03,144 WARN [Thread-904 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:03,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:32880 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741839_1021] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data5, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data6]'}, localName='127.0.0.1:34139', datanodeUuid='db426a4e-f47e-441e-82e9-4eb48c4c0a2d', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741839_1021 to mirror 127.0.0.1:39763 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:03,144 WARN [Thread-904 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741839_1021 2024-11-24T02:53:03,144 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:32880 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741839_1021] {}] datanode.BlockReceiver(316): Block 1073741839 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T02:53:03,144 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:32880 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:34139:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32880 dst: /127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:03,146 WARN [Thread-904 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:03,152 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:03,152 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:03,152 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:03,152 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:03,152 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:03,152 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416783140 2024-11-24T02:53:03,153 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36199:36199),(127.0.0.1/127.0.0.1:35737:35737)] 2024-11-24T02:53:03,154 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 is not closed yet, will try archiving it next time 2024-11-24T02:53:03,154 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 is not closed yet, will try archiving it next time 2024-11-24T02:53:03,155 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34139 is added to blk_1073741838_1020 (size=2431) 2024-11-24T02:53:03,193 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [10,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T02:53:03,336 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:03,557 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 is not closed yet, will try archiving it next time 2024-11-24T02:53:04,941 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@6aeb18b9[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:34139, datanodeUuid=db426a4e-f47e-441e-82e9-4eb48c4c0a2d, infoPort=35737, infoSecurePort=0, ipcPort=40327, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741838_1020 to 127.0.0.1:36945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,136 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,154 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,197 WARN [ResponseProcessor for block BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022 java.io.IOException: Bad response ERROR for BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022 from datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,197 WARN [DataStreamer for file /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416783140 block BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:05,197 WARN [PacketResponder: BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:34139] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45852 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45852 dst: /127.0.0.1:33739 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,199 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:32894 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1022] {}] datanode.DataXceiver(331): 127.0.0.1:34139:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:32894 dst: /127.0.0.1:34139 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,295 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@78e445ac{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:05,296 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5c0b8b07{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:05,296 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:05,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3ad9bbfc{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:05,297 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2af8c71d{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:05,300 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:05,300 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-450919650-172.17.0.2-1732416762899 (Datanode Uuid db426a4e-f47e-441e-82e9-4eb48c4c0a2d) service to localhost/127.0.0.1:46305 2024-11-24T02:53:05,300 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:05,300 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:05,301 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data5/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:05,302 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data6/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:05,302 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:05,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34067 {}] regionserver.HRegion(8855): Flush requested on 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:05,312 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7aa7cd2006b90d267df7d090f44cfe26 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:53:05,333 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/b51953f3a7d3401db00e32c5201d79e5 is 1080, key is row0002/info:/1732416781190/Put/seqid=0 2024-11-24T02:53:05,334 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741841_1024 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,334 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741841_1024 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:05,334 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741841_1024 2024-11-24T02:53:05,335 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:05,336 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741842_1025 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,336 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741842_1025 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:05,336 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741842_1025 2024-11-24T02:53:05,337 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:05,337 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,339 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741843_1026 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:39763 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45862 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741843_1026] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741843_1026 to mirror 127.0.0.1:39763 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,339 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741843_1026 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:05,339 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741843_1026 2024-11-24T02:53:05,339 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45862 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741843_1026] {}] datanode.BlockReceiver(316): Block 1073741843 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:05,339 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45862 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741843_1026] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45862 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,340 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:05,342 WARN [Thread-916 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741844_1027 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,342 WARN [Thread-916 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741844_1027 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:05,342 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45872 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741844_1027] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741844_1027 to mirror 127.0.0.1:36945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,342 WARN [Thread-916 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741844_1027 2024-11-24T02:53:05,342 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45872 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741844_1027] {}] datanode.BlockReceiver(316): Block 1073741844 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:05,342 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45872 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741844_1027] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45872 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,343 WARN [Thread-916 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:05,344 WARN [IPC Server handler 1 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:05,344 WARN [IPC Server handler 1 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:05,344 WARN [IPC Server handler 1 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:05,347 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741845_1028 (size=10347) 2024-11-24T02:53:05,749 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/b51953f3a7d3401db00e32c5201d79e5 2024-11-24T02:53:05,764 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/b51953f3a7d3401db00e32c5201d79e5 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/b51953f3a7d3401db00e32c5201d79e5 2024-11-24T02:53:05,772 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/b51953f3a7d3401db00e32c5201d79e5, entries=5, sequenceid=11, filesize=10.1 K 2024-11-24T02:53:05,774 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=9.45 KB/9681 for 7aa7cd2006b90d267df7d090f44cfe26 in 462ms, sequenceid=11, compaction requested=false 2024-11-24T02:53:05,774 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:05,952 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34067 {}] regionserver.HRegion(8855): Flush requested on 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:05,952 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7aa7cd2006b90d267df7d090f44cfe26 1/1 column families, dataSize=10.50 KB heapSize=11.50 KB 2024-11-24T02:53:05,962 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/66e6b70c66864659b2fb0bfb2b7f0f18 is 1080, key is row0007/info:/1732416785313/Put/seqid=0 2024-11-24T02:53:05,964 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741846_1029 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,964 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741846_1029 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK], DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:05,964 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741846_1029 2024-11-24T02:53:05,965 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:05,966 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741847_1030 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,967 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741847_1030 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:05,967 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741847_1030 2024-11-24T02:53:05,967 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:05,970 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741848_1031 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,970 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741848_1031 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:05,970 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741848_1031 2024-11-24T02:53:05,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45902 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741848_1031] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741848_1031 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,970 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45902 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741848_1031] {}] datanode.BlockReceiver(316): Block 1073741848 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:05,970 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45902 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741848_1031] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45902 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:05,971 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:05,972 WARN [Thread-923 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741849_1032 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:05,972 WARN [Thread-923 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741849_1032 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:05,972 WARN [Thread-923 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741849_1032 2024-11-24T02:53:05,973 WARN [Thread-923 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:05,973 WARN [IPC Server handler 1 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:05,973 WARN [IPC Server handler 1 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:05,974 WARN [IPC Server handler 1 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:05,976 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741850_1033 (size=12506) 2024-11-24T02:53:06,378 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.50 KB at sequenceid=24 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/66e6b70c66864659b2fb0bfb2b7f0f18 2024-11-24T02:53:06,384 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/66e6b70c66864659b2fb0bfb2b7f0f18 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18 2024-11-24T02:53:06,391 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18, entries=7, sequenceid=24, filesize=12.2 K 2024-11-24T02:53:06,392 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.50 KB/10756, heapSize ~11.48 KB/11760, currentSize=2.10 KB/2150 for 7aa7cd2006b90d267df7d090f44cfe26 in 440ms, sequenceid=24, compaction requested=false 2024-11-24T02:53:06,392 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:06,393 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=22.3 K, sizeToCheck=16.0 K 2024-11-24T02:53:06,393 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:06,393 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18 because midkey is the same as first or last row 2024-11-24T02:53:07,136 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,154 WARN [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]] 2024-11-24T02:53:07,154 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,154 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34067%2C1732416765209:(num 1732416783140) roll requested 2024-11-24T02:53:07,155 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.1732416787154 2024-11-24T02:53:07,157 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741851_1034 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,158 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741851_1034 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:07,158 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741851_1034 2024-11-24T02:53:07,158 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:07,160 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741852_1035 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45926 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741852_1035] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741852_1035 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,160 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741852_1035 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:07,160 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741852_1035 2024-11-24T02:53:07,160 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45926 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741852_1035] {}] datanode.BlockReceiver(316): Block 1073741852 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T02:53:07,160 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45926 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741852_1035] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45926 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,161 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:07,163 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741853_1036 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40699 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,163 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45940 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741853_1036] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741853_1036 to mirror 127.0.0.1:40699 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,163 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741853_1036 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:07,163 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741853_1036 2024-11-24T02:53:07,163 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45940 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741853_1036] {}] datanode.BlockReceiver(316): Block 1073741853 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T02:53:07,163 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45940 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741853_1036] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45940 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,164 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:07,165 WARN [Thread-928 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741854_1037 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,165 WARN [Thread-928 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741854_1037 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:07,165 WARN [Thread-928 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741854_1037 2024-11-24T02:53:07,166 WARN [Thread-928 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:07,167 WARN [IPC Server handler 4 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:07,167 WARN [IPC Server handler 4 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:07,167 WARN [IPC Server handler 4 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:07,170 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:07,170 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:07,170 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:07,170 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:07,170 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:07,170 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416783140 with entries=24, filesize=24.23 KB; new WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416787154 2024-11-24T02:53:07,172 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36199:36199)] 2024-11-24T02:53:07,172 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 is not closed yet, will try archiving it next time 2024-11-24T02:53:07,172 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416783140 is not closed yet, will try archiving it next time 2024-11-24T02:53:07,172 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs/7c69a60bd8f6%2C34067%2C1732416765209.1732416779127 2024-11-24T02:53:07,173 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741840_1023 (size=24824) 2024-11-24T02:53:07,337 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,378 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34067 {}] regionserver.HRegion(8855): Flush requested on 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:07,379 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7aa7cd2006b90d267df7d090f44cfe26 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T02:53:07,387 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/40194772dcd1426987038ff052ad7fc5 is 1079, key is tmprow/info:/1732416787376/Put/seqid=0 2024-11-24T02:53:07,390 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741856_1039 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45954 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741856_1039] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741856_1039 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,390 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741856_1039 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:07,390 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45954 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741856_1039] {}] datanode.BlockReceiver(316): Block 1073741856 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:07,390 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741856_1039 2024-11-24T02:53:07,390 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45954 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741856_1039] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45954 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,391 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:07,392 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741857_1040 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,392 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741857_1040 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:07,392 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741857_1040 2024-11-24T02:53:07,393 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:07,394 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741858_1041 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,394 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741858_1041 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:07,394 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741858_1041 2024-11-24T02:53:07,394 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:07,396 WARN [Thread-934 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741859_1042 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40699 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45964 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741859_1042] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741859_1042 to mirror 127.0.0.1:40699 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,396 WARN [Thread-934 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741859_1042 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:07,396 WARN [Thread-934 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741859_1042 2024-11-24T02:53:07,396 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45964 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741859_1042] {}] datanode.BlockReceiver(316): Block 1073741859 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:07,396 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:45964 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741859_1042] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45964 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:07,397 WARN [Thread-934 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:07,397 WARN [IPC Server handler 0 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:07,397 WARN [IPC Server handler 0 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:07,397 WARN [IPC Server handler 0 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:07,401 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741860_1043 (size=6027) 2024-11-24T02:53:07,575 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 is not closed yet, will try archiving it next time 2024-11-24T02:53:07,802 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=34 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/40194772dcd1426987038ff052ad7fc5 2024-11-24T02:53:07,809 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/40194772dcd1426987038ff052ad7fc5 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/40194772dcd1426987038ff052ad7fc5 2024-11-24T02:53:07,816 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/40194772dcd1426987038ff052ad7fc5, entries=1, sequenceid=34, filesize=5.9 K 2024-11-24T02:53:07,817 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7aa7cd2006b90d267df7d090f44cfe26 in 439ms, sequenceid=34, compaction requested=true 2024-11-24T02:53:07,817 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:07,818 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=28.2 K, sizeToCheck=16.0 K 2024-11-24T02:53:07,818 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:07,818 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18 because midkey is the same as first or last row 2024-11-24T02:53:07,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7aa7cd2006b90d267df7d090f44cfe26:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:53:07,818 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:53:07,818 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:53:07,819 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 28880 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:53:07,820 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HStore(1541): 7aa7cd2006b90d267df7d090f44cfe26/info is initiating minor compaction (all files) 2024-11-24T02:53:07,820 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7aa7cd2006b90d267df7d090f44cfe26/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:53:07,820 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/b51953f3a7d3401db00e32c5201d79e5, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/40194772dcd1426987038ff052ad7fc5] into tmpdir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp, totalSize=28.2 K 2024-11-24T02:53:07,820 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.Compactor(225): Compacting b51953f3a7d3401db00e32c5201d79e5, keycount=5, bloomtype=ROW, size=10.1 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732416781190 2024-11-24T02:53:07,821 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.Compactor(225): Compacting 66e6b70c66864659b2fb0bfb2b7f0f18, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=24, earliestPutTs=1732416785313 2024-11-24T02:53:07,821 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.Compactor(225): Compacting 40194772dcd1426987038ff052ad7fc5, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732416787376 2024-11-24T02:53:07,836 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7aa7cd2006b90d267df7d090f44cfe26#info#compaction#21 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:53:07,836 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/d2d7cafea7584f0e88d1de2ed600268e is 1080, key is row0002/info:/1732416781190/Put/seqid=0 2024-11-24T02:53:07,838 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741861_1044 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,838 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741861_1044 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:07,838 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741861_1044 2024-11-24T02:53:07,839 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:07,840 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741862_1045 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,840 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741862_1045 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:07,840 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741862_1045 2024-11-24T02:53:07,841 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:07,842 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741863_1046 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,842 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741863_1046 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:07,842 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741863_1046 2024-11-24T02:53:07,842 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:07,843 WARN [Thread-942 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741864_1047 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:07,843 WARN [Thread-942 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741864_1047 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:07,844 WARN [Thread-942 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741864_1047 2024-11-24T02:53:07,844 WARN [Thread-942 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:07,844 WARN [IPC Server handler 3 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:07,845 WARN [IPC Server handler 3 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:07,845 WARN [IPC Server handler 3 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:07,848 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741865_1048 (size=17994) 2024-11-24T02:53:08,041 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68277e63[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741845_1028 to 127.0.0.1:39763 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:08,042 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@66e09d17[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741850_1033 to 127.0.0.1:36945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:08,262 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/d2d7cafea7584f0e88d1de2ed600268e as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e 2024-11-24T02:53:08,271 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7aa7cd2006b90d267df7d090f44cfe26/info of 7aa7cd2006b90d267df7d090f44cfe26 into d2d7cafea7584f0e88d1de2ed600268e(size=17.6 K), total size for store is 17.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:53:08,271 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:08,271 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26., storeName=7aa7cd2006b90d267df7d090f44cfe26/info, priority=13, startTime=1732416787818; duration=0sec 2024-11-24T02:53:08,271 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T02:53:08,271 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:08,271 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e because midkey is the same as first or last row 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e because midkey is the same as first or last row 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.6 K, sizeToCheck=16.0 K 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e because midkey is the same as first or last row 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:53:08,272 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7aa7cd2006b90d267df7d090f44cfe26:info 2024-11-24T02:53:08,805 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34067 {}] regionserver.HRegion(8855): Flush requested on 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:08,805 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7aa7cd2006b90d267df7d090f44cfe26 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T02:53:08,813 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/5a13fbc3e1be49b98f02cb833e6bbc3d is 1079, key is tmprow/info:/1732416788803/Put/seqid=0 2024-11-24T02:53:08,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46020 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741866_1049] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741866_1049 to mirror 127.0.0.1:40699 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:08,817 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741866_1049 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40699 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:08,817 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741866_1049 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:08,817 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46020 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741866_1049] {}] datanode.BlockReceiver(316): Block 1073741866 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:08,817 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741866_1049 2024-11-24T02:53:08,817 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46020 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741866_1049] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46020 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:08,818 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:08,820 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741867_1050 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:08,820 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741867_1050 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:08,820 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741867_1050 2024-11-24T02:53:08,821 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:08,822 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741868_1051 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:08,822 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741868_1051 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:08,823 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741868_1051 2024-11-24T02:53:08,823 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:08,825 WARN [Thread-950 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741869_1052 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:08,825 WARN [Thread-950 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741869_1052 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:08,825 WARN [Thread-950 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741869_1052 2024-11-24T02:53:08,826 WARN [Thread-950 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:08,827 WARN [IPC Server handler 2 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:08,827 WARN [IPC Server handler 2 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:08,827 WARN [IPC Server handler 2 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:08,831 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741870_1053 (size=6027) 2024-11-24T02:53:09,040 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@66e09d17[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741840_1023 to 127.0.0.1:36945 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:09,040 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68277e63[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741860_1043 to 127.0.0.1:34139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:09,137 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:09,172 WARN [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(529): HDFS pipeline error detected. Found 1 replicas but expecting no less than 2 replicas. Requesting close of WAL. current pipeline: [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]] 2024-11-24T02:53:09,172 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:09,172 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34067%2C1732416765209:(num 1732416787154) roll requested 2024-11-24T02:53:09,173 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.1732416789172 2024-11-24T02:53:09,176 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741871_1054 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:09,176 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741871_1054 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:09,176 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741871_1054 2024-11-24T02:53:09,177 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:09,180 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741872_1055 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40699 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:09,179 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46032 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741872_1055] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741872_1055 to mirror 127.0.0.1:40699 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:09,180 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741872_1055 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:09,180 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741872_1055 2024-11-24T02:53:09,180 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46032 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741872_1055] {}] datanode.BlockReceiver(316): Block 1073741872 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T02:53:09,180 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46032 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741872_1055] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46032 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:09,181 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:09,182 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741873_1056 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:09,182 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741873_1056 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:09,183 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741873_1056 2024-11-24T02:53:09,183 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:09,186 WARN [Thread-955 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741874_1057 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:09,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46046 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741874_1057] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741874_1057 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:09,186 WARN [Thread-955 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741874_1057 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:09,186 WARN [Thread-955 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741874_1057 2024-11-24T02:53:09,186 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46046 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741874_1057] {}] datanode.BlockReceiver(316): Block 1073741874 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T02:53:09,186 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46046 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741874_1057] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46046 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:09,187 WARN [Thread-955 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:09,188 WARN [IPC Server handler 4 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:09,188 WARN [IPC Server handler 4 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:09,188 WARN [IPC Server handler 4 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:09,192 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:09,192 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:09,192 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:09,192 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:09,192 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:09,193 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416787154 with entries=14, filesize=12.92 KB; new WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416789172 2024-11-24T02:53:09,193 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36199:36199)] 2024-11-24T02:53:09,193 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 is not closed yet, will try archiving it next time 2024-11-24T02:53:09,193 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416787154 is not closed yet, will try archiving it next time 2024-11-24T02:53:09,194 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416783140 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs/7c69a60bd8f6%2C34067%2C1732416765209.1732416783140 2024-11-24T02:53:09,194 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741855_1038 (size=13234) 2024-11-24T02:53:09,233 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=45 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/5a13fbc3e1be49b98f02cb833e6bbc3d 2024-11-24T02:53:09,246 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/5a13fbc3e1be49b98f02cb833e6bbc3d as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/5a13fbc3e1be49b98f02cb833e6bbc3d 2024-11-24T02:53:09,251 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/5a13fbc3e1be49b98f02cb833e6bbc3d, entries=1, sequenceid=45, filesize=5.9 K 2024-11-24T02:53:09,252 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7aa7cd2006b90d267df7d090f44cfe26 in 447ms, sequenceid=45, compaction requested=false 2024-11-24T02:53:09,252 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:09,252 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=23.5 K, sizeToCheck=16.0 K 2024-11-24T02:53:09,252 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:09,253 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e because midkey is the same as first or last row 2024-11-24T02:53:09,337 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:09,596 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 is not closed yet, will try archiving it next time 2024-11-24T02:53:10,233 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34067 {}] regionserver.HRegion(8855): Flush requested on 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:10,233 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7aa7cd2006b90d267df7d090f44cfe26 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T02:53:10,239 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/c90a234193ca412a8c3e032e3e41560b is 1079, key is tmprow/info:/1732416790232/Put/seqid=0 2024-11-24T02:53:10,240 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741876_1059 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,241 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741876_1059 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:10,241 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741876_1059 2024-11-24T02:53:10,241 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:10,243 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741877_1060 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:40699 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,243 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46056 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741877_1060] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741877_1060 to mirror 127.0.0.1:40699 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:10,243 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741877_1060 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:10,244 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741877_1060 2024-11-24T02:53:10,244 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46056 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741877_1060] {}] datanode.BlockReceiver(316): Block 1073741877 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:10,244 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46056 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741877_1060] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46056 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:10,244 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:10,246 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741878_1061 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,246 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741878_1061 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:10,246 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741878_1061 2024-11-24T02:53:10,247 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:10,248 WARN [Thread-961 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741879_1062 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,249 WARN [Thread-961 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741879_1062 in pipeline [DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:10,249 WARN [Thread-961 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741879_1062 2024-11-24T02:53:10,249 WARN [Thread-961 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:10,250 WARN [IPC Server handler 2 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:10,250 WARN [IPC Server handler 2 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:10,250 WARN [IPC Server handler 2 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:10,253 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741880_1063 (size=6027) 2024-11-24T02:53:10,654 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=55 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/c90a234193ca412a8c3e032e3e41560b 2024-11-24T02:53:10,665 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/c90a234193ca412a8c3e032e3e41560b as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/c90a234193ca412a8c3e032e3e41560b 2024-11-24T02:53:10,673 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/c90a234193ca412a8c3e032e3e41560b, entries=1, sequenceid=55, filesize=5.9 K 2024-11-24T02:53:10,674 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7525, heapSize ~8.11 KB/8304, currentSize=2.10 KB/2150 for 7aa7cd2006b90d267df7d090f44cfe26 in 441ms, sequenceid=55, compaction requested=true 2024-11-24T02:53:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:10,674 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=29.3 K, sizeToCheck=16.0 K 2024-11-24T02:53:10,675 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:10,675 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e because midkey is the same as first or last row 2024-11-24T02:53:10,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 7aa7cd2006b90d267df7d090f44cfe26:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:53:10,675 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:53:10,675 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:53:10,677 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 30048 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:53:10,677 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HStore(1541): 7aa7cd2006b90d267df7d090f44cfe26/info is initiating minor compaction (all files) 2024-11-24T02:53:10,677 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 7aa7cd2006b90d267df7d090f44cfe26/info in TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:53:10,677 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/5a13fbc3e1be49b98f02cb833e6bbc3d, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/c90a234193ca412a8c3e032e3e41560b] into tmpdir=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp, totalSize=29.3 K 2024-11-24T02:53:10,678 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.Compactor(225): Compacting d2d7cafea7584f0e88d1de2ed600268e, keycount=12, bloomtype=ROW, size=17.6 K, encoding=NONE, compression=NONE, seqNum=34, earliestPutTs=1732416781190 2024-11-24T02:53:10,678 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5a13fbc3e1be49b98f02cb833e6bbc3d, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=45, earliestPutTs=1732416788803 2024-11-24T02:53:10,679 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] compactions.Compactor(225): Compacting c90a234193ca412a8c3e032e3e41560b, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=55, earliestPutTs=1732416790232 2024-11-24T02:53:10,694 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 7aa7cd2006b90d267df7d090f44cfe26#info#compaction#24 average throughput is 6.16 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:53:10,694 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/904bb717ebc34710a0eef3e1fb25106d is 1080, key is row0002/info:/1732416781190/Put/seqid=0 2024-11-24T02:53:10,696 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741881_1064 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,696 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741881_1064 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:10,696 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741881_1064 2024-11-24T02:53:10,697 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:10,699 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741882_1065 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:36945 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,699 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46074 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741882_1065] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741882_1065 to mirror 127.0.0.1:36945 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:10,699 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741882_1065 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]) is bad. 2024-11-24T02:53:10,699 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741882_1065 2024-11-24T02:53:10,699 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46074 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741882_1065] {}] datanode.BlockReceiver(316): Block 1073741882 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:10,699 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:46074 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741882_1065] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46074 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:10,699 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:36945,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK] 2024-11-24T02:53:10,701 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741883_1066 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,701 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741883_1066 in pipeline [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK], DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]) is bad. 2024-11-24T02:53:10,701 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741883_1066 2024-11-24T02:53:10,701 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK] 2024-11-24T02:53:10,703 WARN [Thread-966 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741884_1067 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:10,703 WARN [Thread-966 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741884_1067 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:10,703 WARN [Thread-966 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741884_1067 2024-11-24T02:53:10,703 WARN [Thread-966 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:10,704 WARN [IPC Server handler 2 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) For more information, please enable DEBUG log level on org.apache.hadoop.hdfs.server.blockmanagement.BlockPlacementPolicy and org.apache.hadoop.net.NetworkTopology 2024-11-24T02:53:10,704 WARN [IPC Server handler 2 on default port 46305 {}] protocol.BlockStoragePolicy(161): Failed to place enough replicas: expected size is 1 but only 0 storage types can be selected (replication=2, selected=[], unavailable=[DISK], removed=[DISK], policy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}) 2024-11-24T02:53:10,704 WARN [IPC Server handler 2 on default port 46305 {}] blockmanagement.BlockPlacementPolicyDefault(501): Failed to place enough replicas, still in need of 1 to reach 2 (unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]}, newBlock=true) All required storage types are unavailable: unavailableStorages=[DISK], storagePolicy=BlockStoragePolicy{HOT:7, storageTypes=[DISK], creationFallbacks=[], replicationFallbacks=[ARCHIVE]} 2024-11-24T02:53:10,707 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741885_1068 (size=18097) 2024-11-24T02:53:11,041 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@66e09d17[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741870_1053 to 127.0.0.1:40699 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:11,041 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68277e63[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741865_1048 to 127.0.0.1:39763 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:11,118 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/904bb717ebc34710a0eef3e1fb25106d as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/904bb717ebc34710a0eef3e1fb25106d 2024-11-24T02:53:11,127 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 7aa7cd2006b90d267df7d090f44cfe26/info of 7aa7cd2006b90d267df7d090f44cfe26 into 904bb717ebc34710a0eef3e1fb25106d(size=17.7 K), total size for store is 17.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:53:11,127 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:11,127 INFO [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26., storeName=7aa7cd2006b90d267df7d090f44cfe26/info, priority=13, startTime=1732416790675; duration=0sec 2024-11-24T02:53:11,127 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T02:53:11,127 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:11,127 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/904bb717ebc34710a0eef3e1fb25106d because midkey is the same as first or last row 2024-11-24T02:53:11,127 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T02:53:11,127 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:11,127 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/904bb717ebc34710a0eef3e1fb25106d because midkey is the same as first or last row 2024-11-24T02:53:11,128 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=17.7 K, sizeToCheck=16.0 K 2024-11-24T02:53:11,128 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:11,128 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/904bb717ebc34710a0eef3e1fb25106d because midkey is the same as first or last row 2024-11-24T02:53:11,128 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:53:11,128 DEBUG [RS:0;7c69a60bd8f6:34067-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 7aa7cd2006b90d267df7d090f44cfe26:info 2024-11-24T02:53:11,138 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:11,194 WARN [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(539): Too many consecutive RollWriter requests, it's a sign of the total number of live datanodes is lower than the tolerable replicas. 2024-11-24T02:53:11,194 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:11,265 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:11,270 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:11,270 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:11,271 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:11,271 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:53:11,271 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61815e22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:11,272 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7177a9b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:11,338 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:11,365 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@39eaf0e6{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/java.io.tmpdir/jetty-localhost-33131-hadoop-hdfs-3_4_1-tests_jar-_-any-3807425931044545629/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:11,366 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1542e930{HTTP/1.1, (http/1.1)}{localhost:33131} 2024-11-24T02:53:11,366 INFO [Time-limited test {}] server.Server(415): Started @135107ms 2024-11-24T02:53:11,367 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:53:11,791 WARN [Thread-985 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:53:11,798 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd57d396abe839033 with lease ID 0x2543051345156248: from storage DS-15870cd1-c887-4843-8d12-c21d96ed70ee node DatanodeRegistration(127.0.0.1:39099, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=38645, infoSecurePort=0, ipcPort=40763, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 7, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:53:11,798 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xd57d396abe839033 with lease ID 0x2543051345156248: from storage DS-1e275f96-c8ef-4113-ad22-9483b2a54541 node DatanodeRegistration(127.0.0.1:39099, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=38645, infoSecurePort=0, ipcPort=40763, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:12,041 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@68277e63[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741855_1038 to 127.0.0.1:34139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:12,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741880_1063 (size=6027) 2024-11-24T02:53:13,138 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:13,194 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:13,338 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:14,042 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@66e09d17[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:33739, datanodeUuid=a76e24ca-ef22-4122-a046-e4eef85f49b8, infoPort=36199, infoSecurePort=0, ipcPort=45021, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741885_1068 to 127.0.0.1:39763 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:15,030 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T02:53:15,138 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:15,195 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:15,339 INFO [master:store-WAL-Roller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:16,124 ERROR [FSHLog-0-hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData-prefix:7c69a60bd8f6,35999,1732416765056 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:16,125 WARN [FSHLog-0-hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData-prefix:7c69a60bd8f6,35999,1732416765056 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:16,125 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C35999%2C1732416765056:(num 1732416765768) roll requested 2024-11-24T02:53:16,126 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C35999%2C1732416765056.1732416796126 2024-11-24T02:53:16,131 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741886_1069 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:16,131 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741886_1069 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:16,131 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741886_1069 2024-11-24T02:53:16,132 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:16,133 WARN [Thread-1006 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741887_1070 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:16,133 WARN [Thread-1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741887_1070 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK]) is bad. 2024-11-24T02:53:16,133 WARN [Thread-1006 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741887_1070 2024-11-24T02:53:16,134 WARN [Thread-1006 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:39763,DS-046b3858-d28d-4940-862c-dde121ce5de8,DISK] 2024-11-24T02:53:16,139 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:16,139 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:16,139 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:16,139 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:16,139 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:16,140 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 with entries=54, filesize=26.67 KB; new WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416796126 2024-11-24T02:53:16,140 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:16,140 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:16,141 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 2024-11-24T02:53:16,141 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36199:36199),(127.0.0.1/127.0.0.1:38645:38645)] 2024-11-24T02:53:16,141 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 is not closed yet, will try archiving it next time 2024-11-24T02:53:16,141 WARN [IPC Server handler 2 on default port 46305 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 has not been closed. Lease recovery is in progress. RecoveryId = 1072 for block blk_1073741830_1006 2024-11-24T02:53:16,141 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 after 0ms 2024-11-24T02:53:17,139 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:17,195 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:19,140 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:19,196 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:20,143 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 after 4002ms 2024-11-24T02:53:21,140 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:21,196 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:21,814 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@313d1b7 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1009, datanode=DatanodeInfoWithStorage[127.0.0.1:40699,null,null]) java.net.ConnectException: Call From 7c69a60bd8f6/172.17.0.2 to localhost:34869 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T02:53:21,818 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741833_1019 (size=455) 2024-11-24T02:53:22,166 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs/7c69a60bd8f6%2C34067%2C1732416765209.1732416766433 2024-11-24T02:53:22,168 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416787154 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs/7c69a60bd8f6%2C34067%2C1732416765209.1732416787154 2024-11-24T02:53:22,797 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@325ab664[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39099, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=38645, infoSecurePort=0, ipcPort=40763, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741833_1019 to 127.0.0.1:34139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:23,141 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:23,197 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:24,945 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.1732416804945 2024-11-24T02:53:24,951 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:24,951 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:24,951 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:24,951 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:24,951 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:24,951 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416789172 with entries=14, filesize=12.95 KB; new WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416804945 2024-11-24T02:53:24,952 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36199:36199),(127.0.0.1/127.0.0.1:38645:38645)] 2024-11-24T02:53:24,952 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416789172 is not closed yet, will try archiving it next time 2024-11-24T02:53:25,015 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741875_1058 (size=13268) 2024-11-24T02:53:25,019 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34067 {}] regionserver.HRegion(8855): Flush requested on 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:25,019 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 7aa7cd2006b90d267df7d090f44cfe26 1/1 column families, dataSize=7.35 KB heapSize=8.13 KB 2024-11-24T02:53:25,024 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/adc965e2e97b46e3addc09ef1470b674 is 1080, key is row0013/info:/1732416804953/Put/seqid=0 2024-11-24T02:53:25,027 WARN [Thread-1022 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741890_1074 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,027 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36854 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741890_1074] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data4]'}, localName='127.0.0.1:39099', datanodeUuid='1edc601b-a53b-40fd-945a-3baf2679ddc6', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741890_1074 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,027 WARN [Thread-1022 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741890_1074 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39099,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:25,027 WARN [Thread-1022 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741890_1074 2024-11-24T02:53:25,027 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36854 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741890_1074] {}] datanode.BlockReceiver(316): Block 1073741890 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:25,028 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36854 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741890_1074] {}] datanode.DataXceiver(331): 127.0.0.1:39099:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36854 dst: /127.0.0.1:39099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,028 WARN [Thread-1022 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:25,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741891_1075 (size=9267) 2024-11-24T02:53:25,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741891_1075 (size=9267) 2024-11-24T02:53:25,033 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.35 KB at sequenceid=66 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/adc965e2e97b46e3addc09ef1470b674 2024-11-24T02:53:25,041 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/adc965e2e97b46e3addc09ef1470b674 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/adc965e2e97b46e3addc09ef1470b674 2024-11-24T02:53:25,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/adc965e2e97b46e3addc09ef1470b674, entries=4, sequenceid=66, filesize=9.0 K 2024-11-24T02:53:25,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.35 KB/7528, heapSize ~8.11 KB/8304, currentSize=8.41 KB/8607 for 7aa7cd2006b90d267df7d090f44cfe26 in 30ms, sequenceid=66, compaction requested=false 2024-11-24T02:53:25,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 7aa7cd2006b90d267df7d090f44cfe26: 2024-11-24T02:53:25,049 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=26.7 K, sizeToCheck=16.0 K 2024-11-24T02:53:25,049 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:53:25,050 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/904bb717ebc34710a0eef3e1fb25106d because midkey is the same as first or last row 2024-11-24T02:53:25,141 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,198 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(556): LowReplication-Roller was enabled. 2024-11-24T02:53:25,198 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.FSHLog(580): java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,239 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:53:25,239 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:53:25,239 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:53:25,239 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:25,240 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:25,240 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:53:25,240 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:53:25,240 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1274879632, stopped=false 2024-11-24T02:53:25,240 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,35999,1732416765056 2024-11-24T02:53:25,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:53:25,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:53:25,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:53:25,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:25,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:25,259 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:25,259 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:53:25,259 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:53:25,259 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:53:25,260 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:25,260 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,34067,1732416765209' ***** 2024-11-24T02:53:25,260 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:53:25,260 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:53:25,260 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:53:25,260 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,44053,1732416766982' ***** 2024-11-24T02:53:25,260 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:53:25,260 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:53:25,260 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:53:25,260 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:53:25,260 INFO [RS:0;7c69a60bd8f6:34067 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:53:25,260 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:53:25,260 INFO [RS:0;7c69a60bd8f6:34067 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:53:25,260 INFO [RS:1;7c69a60bd8f6:44053 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:53:25,260 INFO [RS:1;7c69a60bd8f6:44053 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:53:25,260 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(3091): Received CLOSE for 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:25,260 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,44053,1732416766982 2024-11-24T02:53:25,260 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:53:25,261 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:53:25,261 INFO [RS:1;7c69a60bd8f6:44053 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:1;7c69a60bd8f6:44053. 2024-11-24T02:53:25,261 DEBUG [RS:1;7c69a60bd8f6:44053 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:53:25,261 DEBUG [RS:1;7c69a60bd8f6:44053 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:25,261 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,34067,1732416765209 2024-11-24T02:53:25,261 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:53:25,261 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,44053,1732416766982; all regions closed. 2024-11-24T02:53:25,261 INFO [RS:0;7c69a60bd8f6:34067 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:34067. 2024-11-24T02:53:25,261 DEBUG [RS:0;7c69a60bd8f6:34067 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:53:25,261 DEBUG [RS:0;7c69a60bd8f6:34067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:25,261 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 7aa7cd2006b90d267df7d090f44cfe26, disabling compactions & flushes 2024-11-24T02:53:25,261 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:53:25,261 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:53:25,261 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:53:25,261 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,261 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:53:25,261 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:53:25,261 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,261 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:53:25,261 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. after waiting 0 ms 2024-11-24T02:53:25,261 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:53:25,261 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,261 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,262 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,262 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T02:53:25,262 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 7aa7cd2006b90d267df7d090f44cfe26 1/1 column families, dataSize=8.41 KB heapSize=9.25 KB 2024-11-24T02:53:25,262 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1325): Online Regions={7aa7cd2006b90d267df7d090f44cfe26=TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T02:53:25,262 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:53:25,262 DEBUG [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 7aa7cd2006b90d267df7d090f44cfe26 2024-11-24T02:53:25,262 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:53:25,262 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:53:25,262 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:53:25,262 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:53:25,262 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,262 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.71 KB heapSize=3.75 KB 2024-11-24T02:53:25,262 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,262 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 2024-11-24T02:53:25,262 ERROR [FSHLog-0-hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad-prefix:7c69a60bd8f6,34067,1732416765209.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,262 WARN [FSHLog-0-hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad-prefix:7c69a60bd8f6,34067,1732416765209.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,263 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34067%2C1732416765209.meta:.meta(num 1732416766765) roll requested 2024-11-24T02:53:25,263 WARN [IPC Server handler 4 on default port 46305 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 has not been closed. Lease recovery is in progress. RecoveryId = 1076 for block blk_1073741837_1013 2024-11-24T02:53:25,263 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 after 1ms 2024-11-24T02:53:25,263 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416805263.meta 2024-11-24T02:53:25,267 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36892 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741892_1077] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data3, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data4]'}, localName='127.0.0.1:39099', datanodeUuid='1edc601b-a53b-40fd-945a-3baf2679ddc6', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741892_1077 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,267 WARN [Thread-1031 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741892_1077 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,267 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36892 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741892_1077] {}] datanode.BlockReceiver(316): Block 1073741892 has not released the reserved bytes. Releasing 268435456 bytes as part of close. 2024-11-24T02:53:25,267 WARN [Thread-1031 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741892_1077 in pipeline [DatanodeInfoWithStorage[127.0.0.1:39099,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:25,267 WARN [Thread-1031 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741892_1077 2024-11-24T02:53:25,267 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36892 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741892_1077] {}] datanode.DataXceiver(331): 127.0.0.1:39099:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36892 dst: /127.0.0.1:39099 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,267 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/548e3b2fa8f843c0a0587b55c825cea3 is 1080, key is row0016/info:/1732416805020/Put/seqid=0 2024-11-24T02:53:25,268 WARN [Thread-1031 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:25,269 WARN [Thread-1032 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741893_1078 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,269 WARN [Thread-1032 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741893_1078 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:25,269 WARN [Thread-1032 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741893_1078 2024-11-24T02:53:25,269 WARN [Thread-1032 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:25,272 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,272 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,272 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,272 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,272 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,272 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta with entries=8, filesize=2.33 KB; new WAL /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416805263.meta 2024-11-24T02:53:25,276 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,276 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:40699,DS-a5ec0835-6b6e-426b-8abf-84a1e2db5e6c,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,276 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta 2024-11-24T02:53:25,277 WARN [IPC Server handler 3 on default port 46305 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta has not been closed. Lease recovery is in progress. RecoveryId = 1081 for block blk_1073741834_1010 2024-11-24T02:53:25,277 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta after 1ms 2024-11-24T02:53:25,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741895_1080 (size=13583) 2024-11-24T02:53:25,278 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741895_1080 (size=13583) 2024-11-24T02:53:25,279 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=8.41 KB at sequenceid=77 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/548e3b2fa8f843c0a0587b55c825cea3 2024-11-24T02:53:25,281 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36199:36199),(127.0.0.1/127.0.0.1:38645:38645)] 2024-11-24T02:53:25,281 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta is not closed yet, will try archiving it next time 2024-11-24T02:53:25,287 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/.tmp/info/548e3b2fa8f843c0a0587b55c825cea3 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/548e3b2fa8f843c0a0587b55c825cea3 2024-11-24T02:53:25,293 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/548e3b2fa8f843c0a0587b55c825cea3, entries=8, sequenceid=77, filesize=13.3 K 2024-11-24T02:53:25,294 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 7aa7cd2006b90d267df7d090f44cfe26 in 33ms, sequenceid=77, compaction requested=true 2024-11-24T02:53:25,295 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/b51953f3a7d3401db00e32c5201d79e5, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/40194772dcd1426987038ff052ad7fc5, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/5a13fbc3e1be49b98f02cb833e6bbc3d, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/c90a234193ca412a8c3e032e3e41560b] to archive 2024-11-24T02:53:25,296 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T02:53:25,297 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/info/1f8603e40b2c45dfae423c65b261253c is 203, key is TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26./info:regioninfo/1732416767565/Put/seqid=0 2024-11-24T02:53:25,298 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/b51953f3a7d3401db00e32c5201d79e5 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/b51953f3a7d3401db00e32c5201d79e5 2024-11-24T02:53:25,300 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/66e6b70c66864659b2fb0bfb2b7f0f18 2024-11-24T02:53:25,300 WARN [Thread-1044 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741896_1082 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,300 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36128 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741896_1082] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741896_1082 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,300 WARN [Thread-1044 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741896_1082 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:25,300 WARN [Thread-1044 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741896_1082 2024-11-24T02:53:25,300 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36128 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741896_1082] {}] datanode.BlockReceiver(316): Block 1073741896 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:25,301 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36128 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741896_1082] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36128 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,301 WARN [Thread-1044 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:25,302 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/d2d7cafea7584f0e88d1de2ed600268e 2024-11-24T02:53:25,303 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/40194772dcd1426987038ff052ad7fc5 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/40194772dcd1426987038ff052ad7fc5 2024-11-24T02:53:25,305 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/5a13fbc3e1be49b98f02cb833e6bbc3d to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/5a13fbc3e1be49b98f02cb833e6bbc3d 2024-11-24T02:53:25,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741897_1083 (size=7089) 2024-11-24T02:53:25,306 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741897_1083 (size=7089) 2024-11-24T02:53:25,306 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.50 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/info/1f8603e40b2c45dfae423c65b261253c 2024-11-24T02:53:25,307 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/c90a234193ca412a8c3e032e3e41560b to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/archive/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/info/c90a234193ca412a8c3e032e3e41560b 2024-11-24T02:53:25,307 DEBUG [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7c69a60bd8f6:35999 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T02:53:25,308 WARN [StoreCloser-TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [b51953f3a7d3401db00e32c5201d79e5=10347, 66e6b70c66864659b2fb0bfb2b7f0f18=12506, d2d7cafea7584f0e88d1de2ed600268e=17994, 40194772dcd1426987038ff052ad7fc5=6027, 5a13fbc3e1be49b98f02cb833e6bbc3d=6027, c90a234193ca412a8c3e032e3e41560b=6027] 2024-11-24T02:53:25,312 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/default/TestLogRolling-testLogRollOnDatanodeDeath/7aa7cd2006b90d267df7d090f44cfe26/recovered.edits/80.seqid, newMaxSeqId=80, maxSeqId=1 2024-11-24T02:53:25,313 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:53:25,313 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 7aa7cd2006b90d267df7d090f44cfe26: Waiting for close lock at 1732416805261Running coprocessor pre-close hooks at 1732416805261Disabling compacts and flushes for region at 1732416805261Disabling writes for close at 1732416805261Obtaining lock to block concurrent updates at 1732416805262 (+1 ms)Preparing flush snapshotting stores in 7aa7cd2006b90d267df7d090f44cfe26 at 1732416805262Finished memstore snapshotting TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26., syncing WAL and waiting on mvcc, flushsize=dataSize=8607, getHeapSize=9456, getOffHeapSize=0, getCellsCount=8 at 1732416805262Flushing stores of TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. at 1732416805262Flushing 7aa7cd2006b90d267df7d090f44cfe26/info: creating writer at 1732416805263 (+1 ms)Flushing 7aa7cd2006b90d267df7d090f44cfe26/info: appending metadata at 1732416805267 (+4 ms)Flushing 7aa7cd2006b90d267df7d090f44cfe26/info: closing flushed file at 1732416805267Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4d551a5e: reopening flushed file at 1732416805286 (+19 ms)Finished flush of dataSize ~8.41 KB/8607, heapSize ~9.23 KB/9456, currentSize=0 B/0 for 7aa7cd2006b90d267df7d090f44cfe26 in 33ms, sequenceid=77, compaction requested=true at 1732416805294 (+8 ms)Writing region close event to WAL at 1732416805308 (+14 ms)Running coprocessor post-close hooks at 1732416805313 (+5 ms)Closed at 1732416805313 2024-11-24T02:53:25,313 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnDatanodeDeath,,1732416767180.7aa7cd2006b90d267df7d090f44cfe26. 2024-11-24T02:53:25,331 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/ns/cb7ab19a380d41aca5b856935347d180 is 43, key is default/ns:d/1732416766847/Put/seqid=0 2024-11-24T02:53:25,333 WARN [Thread-1052 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741898_1084 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createSocketForPipeline(DataStreamer.java:256) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1894) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,333 WARN [Thread-1052 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741898_1084 in pipeline [DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK], DatanodeInfoWithStorage[127.0.0.1:39099,DS-15870cd1-c887-4843-8d12-c21d96ed70ee,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:25,333 WARN [Thread-1052 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741898_1084 2024-11-24T02:53:25,334 WARN [Thread-1052 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:25,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741899_1085 (size=5153) 2024-11-24T02:53:25,339 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741899_1085 (size=5153) 2024-11-24T02:53:25,339 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/ns/cb7ab19a380d41aca5b856935347d180 2024-11-24T02:53:25,360 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/table/759963cd6c4141a29ff974ec4f1b6f8c is 77, key is TestLogRolling-testLogRollOnDatanodeDeath/table:state/1732416767576/Put/seqid=0 2024-11-24T02:53:25,363 WARN [Thread-1058 {}] hdfs.DataStreamer(1959): Exception in createBlockOutputStream blk_1073741900_1086 java.io.IOException: Got error, status=ERROR, status message , ack with firstBadLink as 127.0.0.1:34139 at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:128) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.DataTransferProtoUtil.checkBlockOpStatus(DataTransferProtoUtil.java:104) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.createBlockOutputStream(DataStreamer.java:1947) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForCreate(DataStreamer.java:1842) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:752) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:25,363 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36172 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741900_1086] {}] datanode.DataXceiver(892): DataNode{data=FSDataset{dirpath='[/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9, /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10]'}, localName='127.0.0.1:33739', datanodeUuid='a76e24ca-ef22-4122-a046-e4eef85f49b8', xmitsInProgress=0}:Exception transferring block BP-450919650-172.17.0.2-1732416762899:blk_1073741900_1086 to mirror 127.0.0.1:34139 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,363 WARN [Thread-1058 {}] hdfs.DataStreamer(1731): Error Recovery for BP-450919650-172.17.0.2-1732416762899:blk_1073741900_1086 in pipeline [DatanodeInfoWithStorage[127.0.0.1:33739,DS-ab05df6c-0fbb-40ba-b789-460231ac12e0,DISK], DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK]) is bad. 2024-11-24T02:53:25,363 WARN [Thread-1058 {}] hdfs.DataStreamer(1850): Abandoning BP-450919650-172.17.0.2-1732416762899:blk_1073741900_1086 2024-11-24T02:53:25,363 WARN [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36172 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741900_1086] {}] datanode.BlockReceiver(316): Block 1073741900 has not released the reserved bytes. Releasing 134217728 bytes as part of close. 2024-11-24T02:53:25,363 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_-294536565_22 at /127.0.0.1:36172 [Receiving block BP-450919650-172.17.0.2-1732416762899:blk_1073741900_1086] {}] datanode.DataXceiver(331): 127.0.0.1:33739:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:36172 dst: /127.0.0.1:33739 java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:807) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:25,364 WARN [Thread-1058 {}] hdfs.DataStreamer(1857): Excluding datanode DatanodeInfoWithStorage[127.0.0.1:34139,DS-15975a09-2384-4fe6-aac0-e8ee50f26af6,DISK] 2024-11-24T02:53:25,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741901_1087 (size=5424) 2024-11-24T02:53:25,369 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741901_1087 (size=5424) 2024-11-24T02:53:25,369 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=146 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/table/759963cd6c4141a29ff974ec4f1b6f8c 2024-11-24T02:53:25,376 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/info/1f8603e40b2c45dfae423c65b261253c as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/info/1f8603e40b2c45dfae423c65b261253c 2024-11-24T02:53:25,382 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/info/1f8603e40b2c45dfae423c65b261253c, entries=10, sequenceid=11, filesize=6.9 K 2024-11-24T02:53:25,383 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/ns/cb7ab19a380d41aca5b856935347d180 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/ns/cb7ab19a380d41aca5b856935347d180 2024-11-24T02:53:25,385 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T02:53:25,386 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T02:53:25,389 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/ns/cb7ab19a380d41aca5b856935347d180, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T02:53:25,390 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/.tmp/table/759963cd6c4141a29ff974ec4f1b6f8c as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/table/759963cd6c4141a29ff974ec4f1b6f8c 2024-11-24T02:53:25,396 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/table/759963cd6c4141a29ff974ec4f1b6f8c, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T02:53:25,397 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false 2024-11-24T02:53:25,402 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T02:53:25,402 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:53:25,402 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:53:25,402 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416805262Running coprocessor pre-close hooks at 1732416805262Disabling compacts and flushes for region at 1732416805262Disabling writes for close at 1732416805262Obtaining lock to block concurrent updates at 1732416805262Preparing flush snapshotting stores in 1588230740 at 1732416805262Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1752, getHeapSize=3776, getOffHeapSize=0, getCellsCount=14 at 1732416805262Flushing stores of hbase:meta,,1.1588230740 at 1732416805281 (+19 ms)Flushing 1588230740/info: creating writer at 1732416805281Flushing 1588230740/info: appending metadata at 1732416805297 (+16 ms)Flushing 1588230740/info: closing flushed file at 1732416805297Flushing 1588230740/ns: creating writer at 1732416805313 (+16 ms)Flushing 1588230740/ns: appending metadata at 1732416805330 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732416805330Flushing 1588230740/table: creating writer at 1732416805346 (+16 ms)Flushing 1588230740/table: appending metadata at 1732416805360 (+14 ms)Flushing 1588230740/table: closing flushed file at 1732416805360Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@39c73d5e: reopening flushed file at 1732416805375 (+15 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@68ab3717: reopening flushed file at 1732416805382 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@67cc8a9f: reopening flushed file at 1732416805389 (+7 ms)Finished flush of dataSize ~1.71 KB/1752, heapSize ~3.45 KB/3536, currentSize=0 B/0 for 1588230740 in 135ms, sequenceid=11, compaction requested=false at 1732416805397 (+8 ms)Writing region close event to WAL at 1732416805398 (+1 ms)Running coprocessor post-close hooks at 1732416805402 (+4 ms)Closed at 1732416805402 2024-11-24T02:53:25,402 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:53:25,416 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.1732416789172 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs/7c69a60bd8f6%2C34067%2C1732416765209.1732416789172 2024-11-24T02:53:25,462 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,34067,1732416765209; all regions closed. 2024-11-24T02:53:25,462 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,463 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,463 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,463 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,463 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:25,465 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741894_1079 (size=825) 2024-11-24T02:53:25,466 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741894_1079 (size=825) 2024-11-24T02:53:26,135 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T02:53:26,136 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T02:53:26,299 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:53:26,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741835_1011 (size=393) 2024-11-24T02:53:26,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:53:27,056 INFO [master/7c69a60bd8f6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T02:53:27,056 INFO [master/7c69a60bd8f6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T02:53:27,136 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:53:27,795 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@325ab664[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39099, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=38645, infoSecurePort=0, ipcPort=40763, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741827_1003 to 127.0.0.1:34139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:27,795 WARN [java.util.concurrent.ThreadPoolExecutor$Worker@633c2ef8[State = -1, empty queue] {}] datanode.DataNode$DataTransfer(3129): DatanodeRegistration(127.0.0.1:39099, datanodeUuid=1edc601b-a53b-40fd-945a-3baf2679ddc6, infoPort=38645, infoSecurePort=0, ipcPort=40763, storageInfo=lv=-57;cid=testClusterID;nsid=1913117730;c=1732416762899):Failed to transfer BP-450919650-172.17.0.2-1732416762899:blk_1073741829_1005 to 127.0.0.1:34139 got java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:577) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataNode$DataTransfer.run(DataNode.java:3063) ~[hadoop-hdfs-3.4.1.jar:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:28,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:53:28,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:53:29,044 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741875_1058 (size=13268) 2024-11-24T02:53:29,264 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 after 4002ms 2024-11-24T02:53:29,279 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=1 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta after 4003ms 2024-11-24T02:53:29,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:53:30,262 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T02:53:30,264 DEBUG [RS:1;7c69a60bd8f6:44053 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs 2024-11-24T02:53:30,264 INFO [RS:1;7c69a60bd8f6:44053 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C44053%2C1732416766982:(num 1732416767280) 2024-11-24T02:53:30,264 DEBUG [RS:1;7c69a60bd8f6:44053 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:30,264 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:53:30,264 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:53:30,264 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T02:53:30,265 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:53:30,265 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:53:30,265 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:53:30,265 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:53:30,265 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:53:30,265 INFO [RS:1;7c69a60bd8f6:44053 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:44053 2024-11-24T02:53:30,270 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.FileNotFoundException: File does not exist: /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.ipc.RemoteException.instantiateException(RemoteException.java:121) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.RemoteException.unwrapRemoteException(RemoteException.java:88) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1812) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more Caused by: org.apache.hadoop.ipc.RemoteException: File does not exist: /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:87) at org.apache.hadoop.hdfs.server.namenode.INodeFile.valueOf(INodeFile.java:77) at org.apache.hadoop.hdfs.server.namenode.FSDirStatAndListingOp.isFileClosed(FSDirStatAndListingOp.java:124) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.isFileClosed(FSNamesystem.java:3502) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.isFileClosed(NameNodeRpcServer.java:1248) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.isFileClosed(ClientNamenodeProtocolServerSideTranslatorPB.java:1419) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$isFileClosed$57(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.isFileClosed(ClientNamenodeProtocolTranslatorPB.java:999) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor116.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.isFileClosed(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1810) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:30,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:53:30,311 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,44053,1732416766982 2024-11-24T02:53:30,311 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:53:30,311 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$363/0x00007fe598903a08@41e0db9a rejected from java.util.concurrent.ThreadPoolExecutor@4e25dc2[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 3] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1360) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-24T02:53:30,312 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,44053,1732416766982] 2024-11-24T02:53:30,314 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,332 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,44053,1732416766982 already deleted, retry=false 2024-11-24T02:53:30,332 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,44053,1732416766982 expired; onlineServers=1 2024-11-24T02:53:30,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,333 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,334 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,335 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,360 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,360 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:53:30,422 INFO [RS:1;7c69a60bd8f6:44053 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:53:30,422 INFO [RS:1;7c69a60bd8f6:44053 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,44053,1732416766982; zookeeper connection closed. 2024-11-24T02:53:30,422 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:44053-0x1016ac1f8160002, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:53:30,422 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@76ceb2ee {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@76ceb2ee 2024-11-24T02:53:30,463 ERROR [WAL-Shutdown-0 {}] wal.AbstractFSWAL(2118): We have waited 5 seconds but the close of async writer doesn't complete.Please check the status of underlying filesystem or increase the wait time by the config "hbase.wal.fshlog.wait.on.shutdown.seconds" 2024-11-24T02:53:30,467 DEBUG [RS:0;7c69a60bd8f6:34067 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs 2024-11-24T02:53:30,467 INFO [RS:0;7c69a60bd8f6:34067 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C34067%2C1732416765209.meta:.meta(num 1732416805263) 2024-11-24T02:53:30,467 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,468 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,468 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,468 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,468 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741889_1073 (size=14682) 2024-11-24T02:53:30,471 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741889_1073 (size=14682) 2024-11-24T02:53:30,474 DEBUG [RS:0;7c69a60bd8f6:34067 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs 2024-11-24T02:53:30,474 INFO [RS:0;7c69a60bd8f6:34067 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C34067%2C1732416765209:(num 1732416804945) 2024-11-24T02:53:30,474 DEBUG [RS:0;7c69a60bd8f6:34067 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:30,474 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:53:30,474 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:53:30,474 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T02:53:30,474 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:53:30,475 INFO [RS:0;7c69a60bd8f6:34067 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34067 2024-11-24T02:53:30,475 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:53:30,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,34067,1732416765209 2024-11-24T02:53:30,480 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:53:30,480 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:53:30,490 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,34067,1732416765209] 2024-11-24T02:53:30,500 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,34067,1732416765209 already deleted, retry=false 2024-11-24T02:53:30,501 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,34067,1732416765209 expired; onlineServers=0 2024-11-24T02:53:30,501 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,35999,1732416765056' ***** 2024-11-24T02:53:30,501 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:53:30,501 INFO [M:0;7c69a60bd8f6:35999 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:53:30,501 INFO [M:0;7c69a60bd8f6:35999 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:53:30,501 DEBUG [M:0;7c69a60bd8f6:35999 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:53:30,501 DEBUG [M:0;7c69a60bd8f6:35999 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:53:30,501 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:53:30,501 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416766123 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416766123,5,FailOnTimeoutGroup] 2024-11-24T02:53:30,501 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416766123 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416766123,5,FailOnTimeoutGroup] 2024-11-24T02:53:30,502 INFO [M:0;7c69a60bd8f6:35999 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:53:30,502 INFO [M:0;7c69a60bd8f6:35999 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:53:30,502 DEBUG [M:0;7c69a60bd8f6:35999 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:53:30,502 INFO [M:0;7c69a60bd8f6:35999 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:53:30,502 INFO [M:0;7c69a60bd8f6:35999 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:53:30,502 INFO [M:0;7c69a60bd8f6:35999 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:53:30,502 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:53:30,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:53:30,511 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:30,511 DEBUG [M:0;7c69a60bd8f6:35999 {}] zookeeper.ZKUtil(347): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T02:53:30,511 WARN [M:0;7c69a60bd8f6:35999 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T02:53:30,512 INFO [M:0;7c69a60bd8f6:35999 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/.lastflushedseqids 2024-11-24T02:53:30,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741902_1088 (size=130) 2024-11-24T02:53:30,523 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741902_1088 (size=130) 2024-11-24T02:53:30,524 INFO [M:0;7c69a60bd8f6:35999 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:53:30,525 INFO [M:0;7c69a60bd8f6:35999 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:53:30,525 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:53:30,525 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:30,525 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:30,525 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:53:30,525 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:30,525 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.25 KB heapSize=29.49 KB 2024-11-24T02:53:30,547 DEBUG [M:0;7c69a60bd8f6:35999 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6ded3ed894eb4713ab880b6916eeccf9 is 82, key is hbase:meta,,1/info:regioninfo/1732416766801/Put/seqid=0 2024-11-24T02:53:30,552 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741903_1089 (size=5672) 2024-11-24T02:53:30,553 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741903_1089 (size=5672) 2024-11-24T02:53:30,553 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6ded3ed894eb4713ab880b6916eeccf9 2024-11-24T02:53:30,576 DEBUG [M:0;7c69a60bd8f6:35999 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13b5b44c085542d49066f4453a083af6 is 774, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732416767581/Put/seqid=0 2024-11-24T02:53:30,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741904_1090 (size=6255) 2024-11-24T02:53:30,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741904_1090 (size=6255) 2024-11-24T02:53:30,587 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13b5b44c085542d49066f4453a083af6 2024-11-24T02:53:30,590 INFO [RS:0;7c69a60bd8f6:34067 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:53:30,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:53:30,590 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34067-0x1016ac1f8160001, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:53:30,590 INFO [RS:0;7c69a60bd8f6:34067 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,34067,1732416765209; zookeeper connection closed. 2024-11-24T02:53:30,590 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@12552b9b {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@12552b9b 2024-11-24T02:53:30,591 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 2 regionserver(s) complete 2024-11-24T02:53:30,595 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 13b5b44c085542d49066f4453a083af6 2024-11-24T02:53:30,616 DEBUG [M:0;7c69a60bd8f6:35999 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2693844be29b48caa8a48896cc465112 is 69, key is 7c69a60bd8f6,34067,1732416765209/rs:state/1732416766175/Put/seqid=0 2024-11-24T02:53:30,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741905_1091 (size=5224) 2024-11-24T02:53:30,628 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741905_1091 (size=5224) 2024-11-24T02:53:30,629 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=130 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2693844be29b48caa8a48896cc465112 2024-11-24T02:53:30,651 DEBUG [M:0;7c69a60bd8f6:35999 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8d422ceafc684ea38c0907359ed44aba is 52, key is load_balancer_on/state:d/1732416766958/Put/seqid=0 2024-11-24T02:53:30,658 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741906_1092 (size=5056) 2024-11-24T02:53:30,659 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741906_1092 (size=5056) 2024-11-24T02:53:30,660 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=60 (bloomFilter=true), to=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8d422ceafc684ea38c0907359ed44aba 2024-11-24T02:53:30,673 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/6ded3ed894eb4713ab880b6916eeccf9 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6ded3ed894eb4713ab880b6916eeccf9 2024-11-24T02:53:30,681 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/6ded3ed894eb4713ab880b6916eeccf9, entries=8, sequenceid=60, filesize=5.5 K 2024-11-24T02:53:30,683 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/13b5b44c085542d49066f4453a083af6 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/13b5b44c085542d49066f4453a083af6 2024-11-24T02:53:30,690 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 13b5b44c085542d49066f4453a083af6 2024-11-24T02:53:30,691 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/13b5b44c085542d49066f4453a083af6, entries=6, sequenceid=60, filesize=6.1 K 2024-11-24T02:53:30,692 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/2693844be29b48caa8a48896cc465112 as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2693844be29b48caa8a48896cc465112 2024-11-24T02:53:30,703 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/2693844be29b48caa8a48896cc465112, entries=2, sequenceid=60, filesize=5.1 K 2024-11-24T02:53:30,704 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/8d422ceafc684ea38c0907359ed44aba as hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8d422ceafc684ea38c0907359ed44aba 2024-11-24T02:53:30,714 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/8d422ceafc684ea38c0907359ed44aba, entries=1, sequenceid=60, filesize=4.9 K 2024-11-24T02:53:30,715 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 190ms, sequenceid=60, compaction requested=false 2024-11-24T02:53:30,727 INFO [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:30,727 DEBUG [M:0;7c69a60bd8f6:35999 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416810525Disabling compacts and flushes for region at 1732416810525Disabling writes for close at 1732416810525Obtaining lock to block concurrent updates at 1732416810525Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416810525Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23805, getHeapSize=30136, getOffHeapSize=0, getCellsCount=71 at 1732416810526 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416810526Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416810527 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416810546 (+19 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416810546Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416810560 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416810575 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416810575Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416810595 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416810615 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416810615Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732416810635 (+20 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732416810650 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732416810650Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3142a06c: reopening flushed file at 1732416810667 (+17 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@71ea74d2: reopening flushed file at 1732416810681 (+14 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@10abea60: reopening flushed file at 1732416810691 (+10 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@260a2a17: reopening flushed file at 1732416810703 (+12 ms)Finished flush of dataSize ~23.25 KB/23805, heapSize ~29.43 KB/30136, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 190ms, sequenceid=60, compaction requested=false at 1732416810715 (+12 ms)Writing region close event to WAL at 1732416810727 (+12 ms)Closed at 1732416810727 2024-11-24T02:53:30,728 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,728 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,728 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,728 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,728 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:30,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:39099 is added to blk_1073741888_1071 (size=1045) 2024-11-24T02:53:30,730 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:33739 is added to blk_1073741888_1071 (size=1045) 2024-11-24T02:53:30,863 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:53:30,882 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,883 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,884 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,891 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:30,895 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:31,271 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:31,280 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:31,818 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@69cef80 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-450919650-172.17.0.2-1732416762899:blk_1073741830_1006, datanode=DatanodeInfoWithStorage[127.0.0.1:40699,null,null]) java.net.ConnectException: Call From 7c69a60bd8f6/172.17.0.2 to localhost:34869 failed on connection exception: java.net.ConnectException: Connection refused; For more details see: http://wiki.apache.org/hadoop/ConnectionRefused at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:876) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.net.ConnectException: Connection refused at sun.nio.ch.Net.pollConnect(Native Method) ~[?:?] at sun.nio.ch.Net.pollConnectNow(Net.java:672) ~[?:?] at sun.nio.ch.SocketChannelImpl.finishConnect(SocketChannelImpl.java:946) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout.connect(SocketIOWithTimeout.java:205) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.connect(NetUtils.java:614) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:668) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T02:53:32,154 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/WALs/7c69a60bd8f6,35999,1732416765056/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/oldWALs/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 2024-11-24T02:53:32,157 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/MasterData/oldWALs/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768 to hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/oldWALs/7c69a60bd8f6%2C35999%2C1732416765056.1732416765768$masterlocalwal$ 2024-11-24T02:53:32,158 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:53:32,158 INFO [M:0;7c69a60bd8f6:35999 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:53:32,158 INFO [M:0;7c69a60bd8f6:35999 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35999 2024-11-24T02:53:32,158 INFO [M:0;7c69a60bd8f6:35999 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:53:32,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:32,274 INFO [M:0;7c69a60bd8f6:35999 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:53:32,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:53:32,274 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:35999-0x1016ac1f8160000, quorum=127.0.0.1:59188, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:53:32,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@39eaf0e6{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:32,277 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1542e930{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:32,277 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:32,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7177a9b6{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:32,277 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61815e22{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:32,281 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:32,281 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:32,281 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-450919650-172.17.0.2-1732416762899 (Datanode Uuid 1edc601b-a53b-40fd-945a-3baf2679ddc6) service to localhost/127.0.0.1:46305 2024-11-24T02:53:32,282 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:32,281 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:40699,null,null]) java.io.InterruptedIOException: DestHost:destPort localhost:34869 , LocalHost:localPort 7c69a60bd8f6/172.17.0.2:0. Failed on local exception: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.net.NetUtils.wrapWithMessage(NetUtils.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.NetUtils.wrapException(NetUtils.java:936) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1588) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy55.initReplicaRecovery(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.lambda$initReplicaRecovery$0(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.InterDatanodeProtocolTranslatorPB.initReplicaRecovery(InterDatanodeProtocolTranslatorPB.java:82) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.callInitReplicaRecovery(BlockRecoveryWorker.java:561) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$400(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:135) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.InterruptedIOException: Interrupted: action=RetryAction(action=RETRY, delayMillis=1000, reason=retries get failed due to exceeded maximum allowed retries number: 10), retry policy=RetryUpToMaximumCountWithFixedSleep(maxRetries=10, sleepTime=1000 MILLISECONDS) at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:963) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more Caused by: java.lang.InterruptedException: sleep interrupted at java.lang.Thread.sleep(Native Method) ~[?:?] at org.apache.hadoop.ipc.Client$Connection.handleConnectionFailure(Client.java:961) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupConnection(Client.java:691) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.setupIOstreams(Client.java:789) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client$Connection.access$3800(Client.java:364) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.getConnection(Client.java:1649) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1473) ~[hadoop-common-3.4.1.jar:?] ... 12 more 2024-11-24T02:53:32,282 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013, datanode=DatanodeInfoWithStorage[127.0.0.1:39099,null,null]) java.io.IOException: No block pool offer service for bpid=BP-450919650-172.17.0.2-1732416762899 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:32,282 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40699,null,null], DatanodeInfoWithStorage[127.0.0.1:39099,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-450919650-172.17.0.2-1732416762899:blk_1073741837_1013, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40699,null,null], DatanodeInfoWithStorage[127.0.0.1:39099,null,null]] 2024-11-24T02:53:32,282 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:40699,null,null]) java.io.IOException: No block pool offer service for bpid=BP-450919650-172.17.0.2-1732416762899 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:32,282 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$RecoveryTaskContiguous(164): Failed to recover block (block=BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010, datanode=DatanodeInfoWithStorage[127.0.0.1:39099,null,null]) java.io.IOException: No block pool offer service for bpid=BP-450919650-172.17.0.2-1732416762899 at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.getDatanodeID(BlockRecoveryWorker.java:539) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker.access$000(BlockRecoveryWorker.java:57) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$RecoveryTaskContiguous.recover(BlockRecoveryWorker.java:131) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1.run(BlockRecoveryWorker.java:602) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:32,282 WARN [org.apache.hadoop.hdfs.server.datanode.BlockRecoveryWorker$1@7e3fd6e1 {}] datanode.BlockRecoveryWorker$1(605): recover Block: RecoveringBlock{BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010; getBlockSize()=85; corrupt=false; offset=-1; locs=[DatanodeInfoWithStorage[127.0.0.1:40699,null,null], DatanodeInfoWithStorage[127.0.0.1:39099,null,null]]; cachedLocs=[]} FAILED: java.io.IOException: All datanodes failed: block=BP-450919650-172.17.0.2-1732416762899:blk_1073741834_1010, datanodeids=[DatanodeInfoWithStorage[127.0.0.1:40699,null,null], DatanodeInfoWithStorage[127.0.0.1:39099,null,null]] 2024-11-24T02:53:32,282 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data3/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:32,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:32,283 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data4/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:32,283 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:32,289 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7eb01e24{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:32,289 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4949cd53{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:32,290 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:32,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7524e7e9{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:32,290 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@69248046{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:32,291 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:32,291 WARN [BP-450919650-172.17.0.2-1732416762899 heartbeating to localhost/127.0.0.1:46305 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-450919650-172.17.0.2-1732416762899 (Datanode Uuid a76e24ca-ef22-4122-a046-e4eef85f49b8) service to localhost/127.0.0.1:46305 2024-11-24T02:53:32,292 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:32,292 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:32,292 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data9/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:32,292 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/cluster_96e1a053-acca-e2f8-841a-fbad74107a1b/data/data10/current/BP-450919650-172.17.0.2-1732416762899 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:32,293 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:32,298 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@94a50db{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:53:32,299 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@38184680{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:32,299 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:32,299 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1aa07d80{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:32,300 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@475f8022{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:32,309 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:53:32,349 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:53:32,364 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnDatanodeDeath Thread=154 (was 78) Potentially hanging thread: HMaster-EventLoopGroup-7-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-11 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.1@localhost:36039 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46305 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46305 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-6 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46305 from jenkins.hfs.2 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46305 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.2@localhost:46305 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46305 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-7 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Timer for 'DataNode' metrics system java.base@17.0.11/java.lang.Object.wait(Native Method) java.base@17.0.11/java.util.TimerThread.mainLoop(Timer.java:563) java.base@17.0.11/java.util.TimerThread.run(Timer.java:516) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fe598bf52a8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-10 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46305 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: Close-WAL-Writer-0 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:175) app//org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) app//org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL$$Lambda$899/0x00007fe598bf52a8.run(Unknown Source) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) java.base@17.0.11/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46305 from jenkins.hfs.3 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-5 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.3@localhost:46305 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46305 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-21-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-5-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-8-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-19-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-6-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-20-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-7-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36039 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-9 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-18-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: ForkJoinPool-2-worker-5 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkUntil(LockSupport.java:410) java.base@17.0.11/java.util.concurrent.ForkJoinPool.awaitWork(ForkJoinPool.java:1726) java.base@17.0.11/java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1623) java.base@17.0.11/java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:165) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-8 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-9-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46305 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-21-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=450 (was 402) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=152 (was 204), ProcessCount=11 (was 11), AvailableMemoryMB=10715 (was 10091) - AvailableMemoryMB LEAK? - 2024-11-24T02:53:32,373 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=154, OpenFileDescriptor=450, MaxFileDescriptor=1048576, SystemLoadAverage=152, ProcessCount=11, AvailableMemoryMB=10699 2024-11-24T02:53:32,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:53:32,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.log.dir so I do NOT create it in target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa 2024-11-24T02:53:32,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/683673fe-e439-de1c-55b6-a4d95fd01da2/hadoop.tmp.dir so I do NOT create it in target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa 2024-11-24T02:53:32,373 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028, deleteOnExit=true 2024-11-24T02:53:32,373 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/test.cache.data in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:53:32,374 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:53:32,374 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:53:32,375 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:53:32,387 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:53:32,887 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:32,894 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:32,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:32,896 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:32,896 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:53:32,897 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:32,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6bfebe40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:32,897 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@119a3311{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:32,990 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14a79ae9{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir/jetty-localhost-38917-hadoop-hdfs-3_4_1-tests_jar-_-any-16381149856221167254/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:53:32,990 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@7096145a{HTTP/1.1, (http/1.1)}{localhost:38917} 2024-11-24T02:53:32,990 INFO [Time-limited test {}] server.Server(415): Started @156731ms 2024-11-24T02:53:33,002 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:53:33,272 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:33,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:33,389 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:33,392 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:33,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:33,393 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:33,393 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:53:33,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@6fab6db5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:33,394 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@9982f0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:33,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnDatanodeDeath 2024-11-24T02:53:33,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:53:33,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T02:53:33,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T02:53:33,496 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@27ffc774{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir/jetty-localhost-39307-hadoop-hdfs-3_4_1-tests_jar-_-any-3887970381698705318/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:33,497 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@13646a74{HTTP/1.1, (http/1.1)}{localhost:39307} 2024-11-24T02:53:33,497 INFO [Time-limited test {}] server.Server(415): Started @157238ms 2024-11-24T02:53:33,498 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:53:33,545 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:33,552 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:33,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:33,553 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:33,553 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:53:33,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@7bb583ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:33,554 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@16ec9c96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:33,656 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@5a60f964{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir/jetty-localhost-46743-hadoop-hdfs-3_4_1-tests_jar-_-any-1329117943888644704/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:33,656 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@23accf28{HTTP/1.1, (http/1.1)}{localhost:46743} 2024-11-24T02:53:33,657 INFO [Time-limited test {}] server.Server(415): Started @157398ms 2024-11-24T02:53:33,658 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:53:34,273 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:34,283 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:35,275 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:35,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:35,353 WARN [Thread-1197 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data1/current/BP-491344902-172.17.0.2-1732416812399/current, will proceed with Du for space computation calculation, 2024-11-24T02:53:35,353 WARN [Thread-1198 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data2/current/BP-491344902-172.17.0.2-1732416812399/current, will proceed with Du for space computation calculation, 2024-11-24T02:53:35,371 WARN [Thread-1161 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:53:35,373 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9819aed02a44d6c with lease ID 0x4592d97a8a683c39: Processing first storage report for DS-756a0772-e916-4ae1-942d-29039e00c257 from datanode DatanodeRegistration(127.0.0.1:34497, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=44133, infoSecurePort=0, ipcPort=36493, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399) 2024-11-24T02:53:35,374 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9819aed02a44d6c with lease ID 0x4592d97a8a683c39: from storage DS-756a0772-e916-4ae1-942d-29039e00c257 node DatanodeRegistration(127.0.0.1:34497, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=44133, infoSecurePort=0, ipcPort=36493, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:35,374 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x9819aed02a44d6c with lease ID 0x4592d97a8a683c39: Processing first storage report for DS-72ef4a48-eb2c-41fb-b41c-425404791690 from datanode DatanodeRegistration(127.0.0.1:34497, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=44133, infoSecurePort=0, ipcPort=36493, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399) 2024-11-24T02:53:35,374 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x9819aed02a44d6c with lease ID 0x4592d97a8a683c39: from storage DS-72ef4a48-eb2c-41fb-b41c-425404791690 node DatanodeRegistration(127.0.0.1:34497, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=44133, infoSecurePort=0, ipcPort=36493, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:35,480 WARN [Thread-1208 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data3/current/BP-491344902-172.17.0.2-1732416812399/current, will proceed with Du for space computation calculation, 2024-11-24T02:53:35,480 WARN [Thread-1209 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data4/current/BP-491344902-172.17.0.2-1732416812399/current, will proceed with Du for space computation calculation, 2024-11-24T02:53:35,503 WARN [Thread-1184 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:53:35,506 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf742fc406c3e5fd7 with lease ID 0x4592d97a8a683c3a: Processing first storage report for DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18 from datanode DatanodeRegistration(127.0.0.1:42149, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=45363, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399) 2024-11-24T02:53:35,506 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf742fc406c3e5fd7 with lease ID 0x4592d97a8a683c3a: from storage DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18 node DatanodeRegistration(127.0.0.1:42149, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=45363, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:35,507 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xf742fc406c3e5fd7 with lease ID 0x4592d97a8a683c3a: Processing first storage report for DS-90ac1062-f4ff-435c-b59e-c8ca89c8f052 from datanode DatanodeRegistration(127.0.0.1:42149, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=45363, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399) 2024-11-24T02:53:35,507 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xf742fc406c3e5fd7 with lease ID 0x4592d97a8a683c3a: from storage DS-90ac1062-f4ff-435c-b59e-c8ca89c8f052 node DatanodeRegistration(127.0.0.1:42149, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=45363, infoSecurePort=0, ipcPort=38717, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:35,597 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa 2024-11-24T02:53:35,602 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/zookeeper_0, clientPort=59629, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:53:35,603 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=59629 2024-11-24T02:53:35,604 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:35,606 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:35,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:53:35,624 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:53:35,625 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122 with version=8 2024-11-24T02:53:35,626 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase-staging 2024-11-24T02:53:35,629 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:53:35,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:53:35,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:53:35,629 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:53:35,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:53:35,629 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:53:35,629 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:53:35,629 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:53:35,631 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:33509 2024-11-24T02:53:35,634 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:33509 connecting to ZooKeeper ensemble=127.0.0.1:59629 2024-11-24T02:53:35,717 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:335090x0, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:53:35,717 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:33509-0x1016ac2bd9c0000 connected 2024-11-24T02:53:35,809 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:35,811 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:35,813 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:53:35,814 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122, hbase.cluster.distributed=false 2024-11-24T02:53:35,815 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:53:35,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=33509 2024-11-24T02:53:35,816 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=33509 2024-11-24T02:53:35,818 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=33509 2024-11-24T02:53:35,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=33509 2024-11-24T02:53:35,820 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=33509 2024-11-24T02:53:35,842 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:53:35,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:53:35,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:53:35,843 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:53:35,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:53:35,843 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:53:35,843 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:53:35,843 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:53:35,844 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34593 2024-11-24T02:53:35,846 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34593 connecting to ZooKeeper ensemble=127.0.0.1:59629 2024-11-24T02:53:35,847 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:35,848 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:35,862 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:345930x0, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:53:35,862 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:345930x0, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:53:35,862 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34593-0x1016ac2bd9c0001 connected 2024-11-24T02:53:35,862 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:53:35,863 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:53:35,864 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:53:35,865 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:53:35,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34593 2024-11-24T02:53:35,869 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34593 2024-11-24T02:53:35,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34593 2024-11-24T02:53:35,870 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34593 2024-11-24T02:53:35,871 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34593 2024-11-24T02:53:35,884 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:33509 2024-11-24T02:53:35,884 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:35,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:53:35,893 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:53:35,894 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:35,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:53:35,903 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:35,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:35,904 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:53:35,904 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,33509,1732416815628 from backup master directory 2024-11-24T02:53:35,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:53:35,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:35,914 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:53:35,914 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:53:35,914 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:35,918 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/hbase.id] with ID: 2e7c45f8-24bc-4dbd-8f3f-30c5975cceee 2024-11-24T02:53:35,918 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/.tmp/hbase.id 2024-11-24T02:53:35,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:53:35,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:53:35,925 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/.tmp/hbase.id]:[hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/hbase.id] 2024-11-24T02:53:35,938 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:35,938 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:53:35,940 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T02:53:35,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:35,953 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:35,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:53:35,963 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:53:35,964 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:53:35,964 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:53:35,965 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:53:35,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:53:35,972 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:53:35,973 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store 2024-11-24T02:53:35,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:53:35,980 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:53:36,276 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:36,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:36,381 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:53:36,381 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:53:36,381 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:36,381 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:36,381 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:53:36,381 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:36,381 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:53:36,381 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416816381Disabling compacts and flushes for region at 1732416816381Disabling writes for close at 1732416816381Writing region close event to WAL at 1732416816381Closed at 1732416816381 2024-11-24T02:53:36,382 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/.initializing 2024-11-24T02:53:36,382 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:36,385 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C33509%2C1732416815628, suffix=, logDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628, archiveDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/oldWALs, maxLogs=10 2024-11-24T02:53:36,386 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 2024-11-24T02:53:36,397 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 2024-11-24T02:53:36,400 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45363:45363),(127.0.0.1/127.0.0.1:44133:44133)] 2024-11-24T02:53:36,404 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:53:36,405 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:53:36,405 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,405 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,408 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:53:36,410 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:36,410 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,412 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:53:36,412 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:53:36,413 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,414 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:53:36,414 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:53:36,415 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,416 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:53:36,416 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,417 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:53:36,417 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,418 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,418 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,420 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,420 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,420 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:53:36,421 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:53:36,425 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:53:36,425 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=708078, jitterRate=-0.09963344037532806}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:53:36,426 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416816405Initializing all the Stores at 1732416816406 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416816406Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416816407 (+1 ms)Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416816407Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416816407Cleaning up temporary data from old regions at 1732416816420 (+13 ms)Region opened successfully at 1732416816426 (+6 ms) 2024-11-24T02:53:36,427 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:53:36,432 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3306fa55, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:53:36,433 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:53:36,433 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:53:36,433 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:53:36,433 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:53:36,434 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T02:53:36,434 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T02:53:36,434 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:53:36,438 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:53:36,439 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:53:36,490 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:53:36,490 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:53:36,491 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:53:36,503 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:53:36,504 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:53:36,505 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:53:36,514 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:53:36,515 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:53:36,524 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:53:36,527 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:53:36,535 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:53:36,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:53:36,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:53:36,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:36,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:36,546 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,33509,1732416815628, sessionid=0x1016ac2bd9c0000, setting cluster-up flag (Was=false) 2024-11-24T02:53:36,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:36,566 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:36,598 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:53:36,600 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:36,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:36,619 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:36,651 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:53:36,652 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:36,654 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:53:36,655 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:53:36,656 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:53:36,656 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:53:36,656 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,33509,1732416815628 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:53:36,658 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,659 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416846659 2024-11-24T02:53:36,659 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,660 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:53:36,660 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:53:36,660 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:53:36,661 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,661 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:53:36,662 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:53:36,662 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:53:36,663 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416816662,5,FailOnTimeoutGroup] 2024-11-24T02:53:36,663 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416816663,5,FailOnTimeoutGroup] 2024-11-24T02:53:36,663 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,663 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:53:36,663 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,663 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,668 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:53:36,669 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:53:36,670 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:53:36,670 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122 2024-11-24T02:53:36,673 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(746): ClusterId : 2e7c45f8-24bc-4dbd-8f3f-30c5975cceee 2024-11-24T02:53:36,673 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:53:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:53:36,678 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:53:36,678 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:53:36,680 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:53:36,681 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:53:36,681 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:36,682 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:53:36,683 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:53:36,683 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:53:36,684 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:53:36,684 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:36,684 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:53:36,686 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:53:36,686 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:36,686 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:53:36,687 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:53:36,687 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:36,688 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:36,688 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:53:36,688 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740 2024-11-24T02:53:36,689 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740 2024-11-24T02:53:36,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:53:36,690 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:53:36,690 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:53:36,691 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:53:36,694 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:53:36,694 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:53:36,694 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=874660, jitterRate=0.11218787729740143}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:53:36,694 DEBUG [RS:0;7c69a60bd8f6:34593 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@36d38375, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:53:36,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416816679Initializing all the Stores at 1732416816679Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416816679Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416816680 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416816680Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416816680Cleaning up temporary data from old regions at 1732416816690 (+10 ms)Region opened successfully at 1732416816695 (+5 ms) 2024-11-24T02:53:36,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:53:36,695 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:53:36,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:53:36,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:53:36,695 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:53:36,696 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:53:36,696 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416816695Disabling compacts and flushes for region at 1732416816695Disabling writes for close at 1732416816695Writing region close event to WAL at 1732416816695Closed at 1732416816695 2024-11-24T02:53:36,697 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:53:36,697 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:53:36,697 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:53:36,698 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:53:36,700 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:53:36,711 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:34593 2024-11-24T02:53:36,711 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:53:36,711 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:53:36,712 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:53:36,713 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,33509,1732416815628 with port=34593, startcode=1732416815842 2024-11-24T02:53:36,713 DEBUG [RS:0;7c69a60bd8f6:34593 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:53:36,716 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:58009, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.4 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:53:36,716 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33509 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:36,717 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=33509 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:36,718 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122 2024-11-24T02:53:36,719 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:37965 2024-11-24T02:53:36,719 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:53:36,725 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:53:36,725 DEBUG [RS:0;7c69a60bd8f6:34593 {}] zookeeper.ZKUtil(111): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:36,725 WARN [RS:0;7c69a60bd8f6:34593 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:53:36,726 INFO [RS:0;7c69a60bd8f6:34593 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:53:36,726 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,34593,1732416815842] 2024-11-24T02:53:36,726 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:36,735 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:53:36,738 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:53:36,740 INFO [RS:0;7c69a60bd8f6:34593 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:53:36,740 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,744 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:53:36,745 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:53:36,745 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,745 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,745 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,745 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:53:36,746 DEBUG [RS:0;7c69a60bd8f6:34593 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:53:36,748 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,748 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,748 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,749 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,749 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,749 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34593,1732416815842-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:53:36,763 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:53:36,763 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34593,1732416815842-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,763 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,763 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.Replication(171): 7c69a60bd8f6,34593,1732416815842 started 2024-11-24T02:53:36,777 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:36,777 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,34593,1732416815842, RpcServer on 7c69a60bd8f6/172.17.0.2:34593, sessionid=0x1016ac2bd9c0001 2024-11-24T02:53:36,777 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34593,1732416815842' 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:36,778 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34593,1732416815842' 2024-11-24T02:53:36,779 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:53:36,779 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:53:36,779 DEBUG [RS:0;7c69a60bd8f6:34593 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:53:36,779 INFO [RS:0;7c69a60bd8f6:34593 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:53:36,779 INFO [RS:0;7c69a60bd8f6:34593 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:53:36,850 WARN [7c69a60bd8f6:33509 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T02:53:36,881 INFO [RS:0;7c69a60bd8f6:34593 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34593%2C1732416815842, suffix=, logDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842, archiveDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/oldWALs, maxLogs=32 2024-11-24T02:53:36,882 INFO [RS:0;7c69a60bd8f6:34593 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:36,888 INFO [RS:0;7c69a60bd8f6:34593 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:36,889 DEBUG [RS:0;7c69a60bd8f6:34593 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45363:45363),(127.0.0.1/127.0.0.1:44133:44133)] 2024-11-24T02:53:37,100 DEBUG [7c69a60bd8f6:33509 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T02:53:37,101 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:37,105 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,34593,1732416815842, state=OPENING 2024-11-24T02:53:37,154 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:53:37,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:37,167 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:53:37,168 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:53:37,168 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:53:37,168 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:53:37,168 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34593,1732416815842}] 2024-11-24T02:53:37,277 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:37,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:37,323 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:53:37,329 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:42201, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:53:37,337 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:53:37,337 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:53:37,340 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34593%2C1732416815842.meta, suffix=.meta, logDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842, archiveDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/oldWALs, maxLogs=32 2024-11-24T02:53:37,341 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta 2024-11-24T02:53:37,348 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta 2024-11-24T02:53:37,350 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45363:45363),(127.0.0.1/127.0.0.1:44133:44133)] 2024-11-24T02:53:37,351 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:53:37,351 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:53:37,352 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:53:37,352 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:53:37,352 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:53:37,352 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:53:37,352 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:53:37,352 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:53:37,354 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:53:37,355 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:53:37,355 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:37,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:37,356 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:53:37,357 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:53:37,357 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:37,358 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:37,358 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:53:37,359 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:53:37,359 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:37,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:37,360 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:53:37,361 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:53:37,361 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:37,362 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:53:37,362 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:53:37,364 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740 2024-11-24T02:53:37,366 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740 2024-11-24T02:53:37,368 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:53:37,368 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:53:37,369 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:53:37,371 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:53:37,373 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=739560, jitterRate=-0.059601783752441406}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:53:37,373 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:53:37,374 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416817352Writing region info on filesystem at 1732416817352Initializing all the Stores at 1732416817353 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416817354 (+1 ms)Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416817354Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416817354Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416817354Cleaning up temporary data from old regions at 1732416817368 (+14 ms)Running coprocessor post-open hooks at 1732416817373 (+5 ms)Region opened successfully at 1732416817373 2024-11-24T02:53:37,375 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416817323 2024-11-24T02:53:37,377 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:53:37,377 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:53:37,378 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:37,379 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,34593,1732416815842, state=OPEN 2024-11-24T02:53:37,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:53:37,419 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:53:37,419 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:37,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:53:37,419 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:53:37,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:53:37,422 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34593,1732416815842 in 251 msec 2024-11-24T02:53:37,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:53:37,426 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 725 msec 2024-11-24T02:53:37,427 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:53:37,427 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:53:37,428 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:53:37,428 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,34593,1732416815842, seqNum=-1] 2024-11-24T02:53:37,428 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:53:37,429 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44217, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:53:37,435 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 779 msec 2024-11-24T02:53:37,435 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416817435, completionTime=-1 2024-11-24T02:53:37,435 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T02:53:37,435 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:53:37,437 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T02:53:37,437 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732416877437 2024-11-24T02:53:37,437 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732416937437 2024-11-24T02:53:37,437 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T02:53:37,438 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33509,1732416815628-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:37,438 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33509,1732416815628-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:37,438 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33509,1732416815628-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:37,438 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:33509, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:37,438 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:37,438 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:37,440 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.528sec 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33509,1732416815628-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:53:37,442 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33509,1732416815628-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:53:37,445 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:53:37,445 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:53:37,445 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,33509,1732416815628-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:53:37,474 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@5b9f4dad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:53:37,474 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,33509,-1 for getting cluster id 2024-11-24T02:53:37,474 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:53:37,476 DEBUG [HMaster-EventLoopGroup-10-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '2e7c45f8-24bc-4dbd-8f3f-30c5975cceee' 2024-11-24T02:53:37,476 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:53:37,476 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "2e7c45f8-24bc-4dbd-8f3f-30c5975cceee" 2024-11-24T02:53:37,477 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@66e1b7a5, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:53:37,477 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,33509,-1] 2024-11-24T02:53:37,477 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:53:37,477 DEBUG [RPCClient-NioEventLoopGroup-4-12 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:53:37,479 INFO [HMaster-EventLoopGroup-10-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56294, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:53:37,479 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@156f820b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:53:37,480 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:53:37,481 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,34593,1732416815842, seqNum=-1] 2024-11-24T02:53:37,481 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:53:37,483 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-11-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:57846, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:53:37,485 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:37,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:53:37,488 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T02:53:37,488 INFO [Time-limited test {}] wal.TestLogRolling(320): Starting testLogRollOnPipelineRestart 2024-11-24T02:53:37,488 INFO [Time-limited test {}] wal.TestLogRolling(323): Replication=2 2024-11-24T02:53:37,489 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T02:53:37,490 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.AsyncConnectionImpl(321): The fetched master address is 7c69a60bd8f6,33509,1732416815628 2024-11-24T02:53:37,490 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@57f34273 2024-11-24T02:53:37,490 DEBUG [RPCClient-NioEventLoopGroup-4-13 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T02:53:37,492 INFO [HMaster-EventLoopGroup-10-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:56308, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T02:53:37,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33509 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T02:53:37,492 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33509 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T02:53:37,493 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33509 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRollOnPipelineRestart', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:53:37,494 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33509 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T02:53:37,495 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T02:53:37,495 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:37,496 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33509 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRollOnPipelineRestart" procId is: 4 2024-11-24T02:53:37,497 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:53:37,497 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T02:53:37,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741835_1011 (size=395) 2024-11-24T02:53:37,504 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741835_1011 (size=395) 2024-11-24T02:53:37,506 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 901055aa78e8d3b8af4b34b25878ad54, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRollOnPipelineRestart', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122 2024-11-24T02:53:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:42149 is added to blk_1073741836_1012 (size=78) 2024-11-24T02:53:37,513 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:34497 is added to blk_1073741836_1012 (size=78) 2024-11-24T02:53:37,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:53:37,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1722): Closing 901055aa78e8d3b8af4b34b25878ad54, disabling compactions & flushes 2024-11-24T02:53:37,514 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:37,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:37,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. after waiting 0 ms 2024-11-24T02:53:37,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:37,514 INFO [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:37,514 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRollOnPipelineRestart-pool-0 {}] regionserver.HRegion(1676): Region close journal for 901055aa78e8d3b8af4b34b25878ad54: Waiting for close lock at 1732416817514Disabling compacts and flushes for region at 1732416817514Disabling writes for close at 1732416817514Writing region close event to WAL at 1732416817514Closed at 1732416817514 2024-11-24T02:53:37,516 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T02:53:37,516 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54.","families":{"info":[{"qualifier":"regioninfo","vlen":77,"tag":[],"timestamp":"1732416817516"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416817516"}]},"ts":"1732416817516"} 2024-11-24T02:53:37,519 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T02:53:37,520 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T02:53:37,521 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416817520"}]},"ts":"1732416817520"} 2024-11-24T02:53:37,523 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLING in hbase:meta 2024-11-24T02:53:37,523 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=901055aa78e8d3b8af4b34b25878ad54, ASSIGN}] 2024-11-24T02:53:37,525 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=901055aa78e8d3b8af4b34b25878ad54, ASSIGN 2024-11-24T02:53:37,525 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=901055aa78e8d3b8af4b34b25878ad54, ASSIGN; state=OFFLINE, location=7c69a60bd8f6,34593,1732416815842; forceNewPlan=false, retain=false 2024-11-24T02:53:37,677 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=901055aa78e8d3b8af4b34b25878ad54, regionState=OPENING, regionLocation=7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:37,683 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=901055aa78e8d3b8af4b34b25878ad54, ASSIGN because future has completed 2024-11-24T02:53:37,684 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 901055aa78e8d3b8af4b34b25878ad54, server=7c69a60bd8f6,34593,1732416815842}] 2024-11-24T02:53:37,848 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:37,848 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 901055aa78e8d3b8af4b34b25878ad54, NAME => 'TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:53:37,849 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRollOnPipelineRestart 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,849 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:53:37,849 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,849 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,851 INFO [StoreOpener-901055aa78e8d3b8af4b34b25878ad54-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,853 INFO [StoreOpener-901055aa78e8d3b8af4b34b25878ad54-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 901055aa78e8d3b8af4b34b25878ad54 columnFamilyName info 2024-11-24T02:53:37,853 DEBUG [StoreOpener-901055aa78e8d3b8af4b34b25878ad54-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:53:37,854 INFO [StoreOpener-901055aa78e8d3b8af4b34b25878ad54-1 {}] regionserver.HStore(327): Store=901055aa78e8d3b8af4b34b25878ad54/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:53:37,854 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,855 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,856 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,857 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,857 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,859 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,862 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:53:37,863 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 901055aa78e8d3b8af4b34b25878ad54; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=787530, jitterRate=0.0013971328735351562}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:53:37,864 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:53:37,865 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 901055aa78e8d3b8af4b34b25878ad54: Running coprocessor pre-open hook at 1732416817849Writing region info on filesystem at 1732416817849Initializing all the Stores at 1732416817851 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416817851Cleaning up temporary data from old regions at 1732416817857 (+6 ms)Running coprocessor post-open hooks at 1732416817864 (+7 ms)Region opened successfully at 1732416817865 (+1 ms) 2024-11-24T02:53:37,866 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54., pid=6, masterSystemTime=1732416817839 2024-11-24T02:53:37,869 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:37,869 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:37,870 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=901055aa78e8d3b8af4b34b25878ad54, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,34593,1732416815842 2024-11-24T02:53:37,872 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-11-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 901055aa78e8d3b8af4b34b25878ad54, server=7c69a60bd8f6,34593,1732416815842 because future has completed 2024-11-24T02:53:37,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T02:53:37,877 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 901055aa78e8d3b8af4b34b25878ad54, server=7c69a60bd8f6,34593,1732416815842 in 190 msec 2024-11-24T02:53:37,880 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T02:53:37,880 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRollOnPipelineRestart, region=901055aa78e8d3b8af4b34b25878ad54, ASSIGN in 354 msec 2024-11-24T02:53:37,882 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T02:53:37,882 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRollOnPipelineRestart","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416817882"}]},"ts":"1732416817882"} 2024-11-24T02:53:37,884 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRollOnPipelineRestart, state=ENABLED in hbase:meta 2024-11-24T02:53:37,886 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T02:53:37,889 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRollOnPipelineRestart in 393 msec 2024-11-24T02:53:38,278 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:38,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:38,911 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:53:38,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,930 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,931 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:38,938 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:53:39,279 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:39,288 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:40,281 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:40,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:41,282 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:41,291 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:42,284 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:42,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:42,736 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T02:53:42,738 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRollOnPipelineRestart' 2024-11-24T02:53:43,285 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:43,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:43,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T02:53:43,405 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T02:53:43,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T02:53:43,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart Metrics about Tables on a single HBase RegionServer 2024-11-24T02:53:43,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:53:43,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T02:53:43,407 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T02:53:43,407 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T02:53:44,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:44,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:45,286 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:45,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:46,287 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:46,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:47,289 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:47,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:47,512 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=33509 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:53:47,513 INFO [RPCClient-NioEventLoopGroup-4-15 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRollOnPipelineRestart completed 2024-11-24T02:53:47,514 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRollOnPipelineRestart,, stopping at row=TestLogRolling-testLogRollOnPipelineRestart ,, for max=2147483647 with caching=100 2024-11-24T02:53:47,520 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T02:53:47,520 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:53:47,525 DEBUG [RPCClient-NioEventLoopGroup-4-14 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRollOnPipelineRestart', row='row1002', locateType=CURRENT is [region=TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54., hostname=7c69a60bd8f6,34593,1732416815842, seqNum=2] 2024-11-24T02:53:48,290 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:48,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:49,292 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:49,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:49,528 INFO [Time-limited test {}] wal.TestLogRolling(360): log.getCurrentFileName()): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:49,529 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1010 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1010 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:49,529 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1006 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1006 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:49,529 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1009 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1009 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:49,530 WARN [DataStreamer for file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 block BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1006 {}] hdfs.DataStreamer(1731): Error Recovery for BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1006 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42149,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK], DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42149,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]) is bad. 2024-11-24T02:53:49,530 WARN [DataStreamer for file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta block BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1010 {}] hdfs.DataStreamer(1731): Error Recovery for BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1010 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42149,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK], DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42149,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]) is bad. 2024-11-24T02:53:49,530 WARN [DataStreamer for file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 block BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1009 {}] hdfs.DataStreamer(1731): Error Recovery for BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1009 in pipeline [DatanodeInfoWithStorage[127.0.0.1:42149,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK], DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:42149,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]) is bad. 2024-11-24T02:53:49,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:42794 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:42149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42794 dst: /127.0.0.1:42149 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:42790 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:42149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42790 dst: /127.0.0.1:42149 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,531 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1265829868_22 at /127.0.0.1:42780 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:42149:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:42780 dst: /127.0.0.1:42149 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:51618 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51618 dst: /127.0.0.1:34497 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:51608 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51608 dst: /127.0.0.1:34497 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,532 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1265829868_22 at /127.0.0.1:51570 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:51570 dst: /127.0.0.1:34497 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,597 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@5a60f964{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:49,598 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@23accf28{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:49,598 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:49,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@16ec9c96{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:49,599 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@7bb583ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:49,602 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:49,602 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:49,602 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-491344902-172.17.0.2-1732416812399 (Datanode Uuid caf16356-2220-4428-8077-79779e1d94cc) service to localhost/127.0.0.1:37965 2024-11-24T02:53:49,602 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:49,603 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data3/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:49,603 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data4/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:49,604 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:49,612 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:49,616 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:49,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:49,616 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:49,616 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:53:49,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1c7c7c1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:49,617 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@59e94e8a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:49,714 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@10e70018{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir/jetty-localhost-46319-hadoop-hdfs-3_4_1-tests_jar-_-any-3346461579306677641/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:49,714 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@71e80301{HTTP/1.1, (http/1.1)}{localhost:46319} 2024-11-24T02:53:49,714 INFO [Time-limited test {}] server.Server(415): Started @173455ms 2024-11-24T02:53:49,716 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:53:49,737 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1015 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1015 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:49,737 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1014 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1014 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:49,737 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1013 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1013 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:49,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1265829868_22 at /127.0.0.1:45108 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741830_1006] {}] datanode.DataXceiver(331): 127.0.0.1:34497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45108 dst: /127.0.0.1:34497 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:45096 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741834_1010] {}] datanode.DataXceiver(331): 127.0.0.1:34497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45096 dst: /127.0.0.1:34497 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,738 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:45092 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741833_1009] {}] datanode.DataXceiver(331): 127.0.0.1:34497:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45092 dst: /127.0.0.1:34497 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:49,744 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@27ffc774{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:49,744 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@13646a74{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:49,744 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:49,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@9982f0a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:49,745 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6fab6db5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:49,746 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:49,746 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:49,746 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-491344902-172.17.0.2-1732416812399 (Datanode Uuid b6bb60be-a3b0-4573-9686-b5d9cbf00864) service to localhost/127.0.0.1:37965 2024-11-24T02:53:49,746 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:49,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data1/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:49,747 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data2/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:49,747 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:49,757 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:49,759 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:49,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:49,760 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:49,760 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:53:49,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@592e51be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:49,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@3146549d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:49,859 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4307cd3{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir/jetty-localhost-42849-hadoop-hdfs-3_4_1-tests_jar-_-any-3350411558267564882/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:49,859 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@17312068{HTTP/1.1, (http/1.1)}{localhost:42849} 2024-11-24T02:53:49,859 INFO [Time-limited test {}] server.Server(415): Started @173600ms 2024-11-24T02:53:49,861 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:53:50,293 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:50,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:50,666 WARN [Thread-1332 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:53:50,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e43940ae98f500b with lease ID 0x4592d97a8a683c3b: from storage DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18 node DatanodeRegistration(127.0.0.1:35169, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=44877, infoSecurePort=0, ipcPort=34927, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 6, hasStaleStorage: false, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:53:50,669 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8e43940ae98f500b with lease ID 0x4592d97a8a683c3b: from storage DS-90ac1062-f4ff-435c-b59e-c8ca89c8f052 node DatanodeRegistration(127.0.0.1:35169, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=44877, infoSecurePort=0, ipcPort=34927, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:50,930 WARN [Thread-1352 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:53:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d09bd9d577e24ac with lease ID 0x4592d97a8a683c3c: from storage DS-756a0772-e916-4ae1-942d-29039e00c257 node DatanodeRegistration(127.0.0.1:39975, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=40693, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x7d09bd9d577e24ac with lease ID 0x4592d97a8a683c3c: from storage DS-72ef4a48-eb2c-41fb-b41c-425404791690 node DatanodeRegistration(127.0.0.1:39975, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=40693, infoSecurePort=0, ipcPort=43063, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:50,987 INFO [Time-limited test {}] wal.TestLogRolling(372): Data Nodes restarted 2024-11-24T02:53:50,991 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1002 2024-11-24T02:53:50,994 ERROR [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:50,994 WARN [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:50,994 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34593%2C1732416815842:(num 1732416816882) roll requested 2024-11-24T02:53:50,995 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:51,003 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 newFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:51,003 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:51,003 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:51,003 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:51,003 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:51,004 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:51,004 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 with entries=2, filesize=1.59 KB; new WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:51,004 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:51,004 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:51,004 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:51,005 WARN [IPC Server handler 1 on default port 37965 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 has not been closed. Lease recovery is in progress. RecoveryId = 1017 for block blk_1073741833_1014 2024-11-24T02:53:51,005 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 after 1ms 2024-11-24T02:53:51,011 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:44877:44877),(127.0.0.1/127.0.0.1:40693:40693)] 2024-11-24T02:53:51,011 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 is not closed yet, will try archiving it next time 2024-11-24T02:53:51,294 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:51,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:52,295 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:52,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:53,015 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1003 2024-11-24T02:53:53,296 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:53,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:54,298 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:54,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:54,671 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741833_1014: GenerationStamp not matched, existing replica is blk_1073741833_1009 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T02:53:55,006 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 after 4002ms 2024-11-24T02:53:55,018 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016 java.io.IOException: Bad response ERROR for BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016 from datanode DatanodeInfoWithStorage[127.0.0.1:39975,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1223) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:55,019 WARN [DataStreamer for file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 block BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016 {}] hdfs.DataStreamer(1731): Error Recovery for BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016 in pipeline [DatanodeInfoWithStorage[127.0.0.1:35169,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK], DatanodeInfoWithStorage[127.0.0.1:39975,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]]: datanode 1(DatanodeInfoWithStorage[127.0.0.1:39975,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]) is bad. 2024-11-24T02:53:55,019 WARN [PacketResponder: BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016, type=HAS_DOWNSTREAM_IN_PIPELINE, downstreams=1:[127.0.0.1:39975] {}] datanode.BlockReceiver$PacketResponder(1529): IOException in PacketResponder.run(): java.io.IOException: Broken pipe at sun.nio.ch.FileDispatcherImpl.write0(Native Method) ~[?:?] at sun.nio.ch.SocketDispatcher.write(SocketDispatcher.java:62) ~[?:?] at sun.nio.ch.IOUtil.writeFromNativeBuffer(IOUtil.java:132) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:97) ~[?:?] at sun.nio.ch.IOUtil.write(IOUtil.java:53) ~[?:?] at sun.nio.ch.SocketChannelImpl.write(SocketChannelImpl.java:532) ~[?:?] at org.apache.hadoop.net.SocketOutputStream$Writer.performIO(SocketOutputStream.java:62) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:158) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketOutputStream.write(SocketOutputStream.java:116) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:81) ~[?:?] at java.io.BufferedOutputStream.flush(BufferedOutputStream.java:142) ~[?:?] at java.io.DataOutputStream.flush(DataOutputStream.java:128) ~[?:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstreamUnprotected(BlockReceiver.java:1681) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.sendAckUpstream(BlockReceiver.java:1612) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver$PacketResponder.run(BlockReceiver.java:1520) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:55,020 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:37410 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:37410 dst: /127.0.0.1:35169 java.net.SocketException: Connection reset at sun.nio.ch.SocketChannelImpl.throwConnectionReset(SocketChannelImpl.java:394) ~[?:?] at sun.nio.ch.SocketChannelImpl.read(SocketChannelImpl.java:426) ~[?:?] at org.apache.hadoop.net.SocketInputStream$Reader.performIO(SocketInputStream.java:57) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:141) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:55,020 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:34732 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:39975:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:34732 dst: /127.0.0.1:39975 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:55,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4307cd3{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:55,050 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@17312068{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:55,050 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:55,050 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@3146549d{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:55,051 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@592e51be{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:55,052 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:55,052 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:55,052 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-491344902-172.17.0.2-1732416812399 (Datanode Uuid b6bb60be-a3b0-4573-9686-b5d9cbf00864) service to localhost/127.0.0.1:37965 2024-11-24T02:53:55,052 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:55,053 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data1/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:55,053 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data2/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:55,053 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:55,065 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:55,093 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:55,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:55,093 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:55,094 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:53:55,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@730e0fff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:55,094 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@518d8d55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:55,184 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@38ca15c7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir/jetty-localhost-36177-hadoop-hdfs-3_4_1-tests_jar-_-any-1905669786186798629/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:55,184 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6573e60c{HTTP/1.1, (http/1.1)}{localhost:36177} 2024-11-24T02:53:55,185 INFO [Time-limited test {}] server.Server(415): Started @178926ms 2024-11-24T02:53:55,186 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:53:55,209 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1018 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1018 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:55,209 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1979844436_22 at /127.0.0.1:46876 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741837_1016] {}] datanode.DataXceiver(331): 127.0.0.1:35169:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:46876 dst: /127.0.0.1:35169 java.nio.channels.ClosedChannelException: null at java.nio.channels.spi.AbstractSelectableChannel.register(AbstractSelectableChannel.java:222) ~[?:?] at java.nio.channels.SelectableChannel.register(SelectableChannel.java:260) ~[?:?] at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:334) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:55,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@10e70018{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:55,211 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@71e80301{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:53:55,211 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:53:55,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@59e94e8a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:53:55,211 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1c7c7c1e{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,STOPPED} 2024-11-24T02:53:55,212 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:53:55,212 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:53:55,212 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:53:55,212 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-491344902-172.17.0.2-1732416812399 (Datanode Uuid caf16356-2220-4428-8077-79779e1d94cc) service to localhost/127.0.0.1:37965 2024-11-24T02:53:55,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data3/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:55,213 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data4/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:53:55,213 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:53:55,224 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:53:55,226 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:53:55,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:53:55,227 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:53:55,227 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:53:55,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@51065df5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:53:55,228 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1bc7279c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:53:55,299 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:55,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:55,320 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2dd41fe4{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/java.io.tmpdir/jetty-localhost-35055-hadoop-hdfs-3_4_1-tests_jar-_-any-14289733828463916174/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:53:55,320 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@3420abff{HTTP/1.1, (http/1.1)}{localhost:35055} 2024-11-24T02:53:55,320 INFO [Time-limited test {}] server.Server(415): Started @179061ms 2024-11-24T02:53:55,322 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:53:55,679 WARN [Thread-1406 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:53:55,681 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4496428cf07062a with lease ID 0x4592d97a8a683c3d: from storage DS-756a0772-e916-4ae1-942d-29039e00c257 node DatanodeRegistration(127.0.0.1:43101, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=39767, infoSecurePort=0, ipcPort=35807, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:55,681 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe4496428cf07062a with lease ID 0x4592d97a8a683c3d: from storage DS-72ef4a48-eb2c-41fb-b41c-425404791690 node DatanodeRegistration(127.0.0.1:43101, datanodeUuid=b6bb60be-a3b0-4573-9686-b5d9cbf00864, infoPort=39767, infoSecurePort=0, ipcPort=35807, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:55,906 WARN [Thread-1426 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:53:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b0969981fd662a8 with lease ID 0x4592d97a8a683c3e: from storage DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18 node DatanodeRegistration(127.0.0.1:45997, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=42857, infoSecurePort=0, ipcPort=36617, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 6, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:55,908 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x2b0969981fd662a8 with lease ID 0x4592d97a8a683c3e: from storage DS-90ac1062-f4ff-435c-b59e-c8ca89c8f052 node DatanodeRegistration(127.0.0.1:45997, datanodeUuid=caf16356-2220-4428-8077-79779e1d94cc, infoPort=42857, infoSecurePort=0, ipcPort=36617, storageInfo=lv=-57;cid=testClusterID;nsid=1111953976;c=1732416812399), blocks: 7, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:53:56,300 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:56,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:56,338 INFO [Time-limited test {}] wal.TestLogRolling(389): Data Nodes restarted 2024-11-24T02:53:56,343 INFO [Time-limited test {}] wal.AbstractTestLogRolling(330): Validated row row1004 2024-11-24T02:53:56,344 ERROR [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35169,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:56,345 WARN [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35169,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:56,345 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34593%2C1732416815842:(num 1732416830994) roll requested 2024-11-24T02:53:56,345 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 2024-11-24T02:53:56,354 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 newFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 2024-11-24T02:53:56,354 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:56,354 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:56,355 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:56,355 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:56,355 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:56,355 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 with entries=2, filesize=2.37 KB; new WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 2024-11-24T02:53:56,355 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35169,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:56,356 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:35169,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:56,356 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:56,356 WARN [IPC Server handler 0 on default port 37965 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 has not been closed. Lease recovery is in progress. RecoveryId = 1020 for block blk_1073741837_1018 2024-11-24T02:53:56,357 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 after 1ms 2024-11-24T02:53:56,364 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39767:39767),(127.0.0.1/127.0.0.1:42857:42857)] 2024-11-24T02:53:56,364 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 is not closed yet, will try archiving it next time 2024-11-24T02:53:57,301 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:57,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:58,302 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:58,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:58,366 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:53:58,376 DEBUG [Time-limited test {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 newFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:53:58,376 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:58,376 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:58,377 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:58,377 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:58,377 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:53:58,377 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 with entries=1, filesize=1.23 KB; new WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:53:58,379 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:42857:42857),(127.0.0.1/127.0.0.1:39767:39767)] 2024-11-24T02:53:58,379 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 is not closed yet, will try archiving it next time 2024-11-24T02:53:58,379 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 is not closed yet, will try archiving it next time 2024-11-24T02:53:58,379 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:58,379 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:58,381 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 after 2ms 2024-11-24T02:53:58,381 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:58,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741838_1019 (size=1264) 2024-11-24T02:53:58,381 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741838_1019 (size=1264) 2024-11-24T02:53:58,392 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #3: [\x00/METAFAMILY:HBASE::REGION_EVENT::REGION_OPEN/1732416817865/Put/vlen=218/seqid=0] 2024-11-24T02:53:58,392 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #4: [row1002/info:/1732416827526/Put/vlen=1045/seqid=0] 2024-11-24T02:53:58,392 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416816882 2024-11-24T02:53:58,392 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:58,392 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:58,393 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 after 1ms 2024-11-24T02:53:58,393 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:58,396 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #5: [row1003/info:/1732416830993/Put/vlen=1045/seqid=0] 2024-11-24T02:53:58,396 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #6: [row1004/info:/1732416833016/Put/vlen=1045/seqid=0] 2024-11-24T02:53:58,396 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 2024-11-24T02:53:58,396 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 2024-11-24T02:53:58,396 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 2024-11-24T02:53:58,397 WARN [IPC Server handler 1 on default port 37965 {}] namenode.FSNamesystem(3730): BLOCK* internalReleaseLease: All existing blocks are COMPLETE, lease removed, file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 closed. 2024-11-24T02:53:58,397 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 after 1ms 2024-11-24T02:53:58,397 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416836345 2024-11-24T02:53:58,400 DEBUG [Time-limited test {}] wal.TestLogRolling(412): #7: [row1005/info:/1732416836344/Put/vlen=1045/seqid=0] 2024-11-24T02:53:58,400 DEBUG [Time-limited test {}] wal.TestLogRolling(403): recovering lease for hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:53:58,400 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:53:58,400 WARN [IPC Server handler 3 on default port 37965 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 has not been closed. Lease recovery is in progress. RecoveryId = 1022 for block blk_1073741839_1021 2024-11-24T02:53:58,401 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 after 1ms 2024-11-24T02:53:58,783 DEBUG [Close-WAL-Writer-1 {}] wal.AbstractFSWAL(879): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 is not closed yet, will try archiving it next time 2024-11-24T02:53:58,914 WARN [ResponseProcessor for block BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 {}] hdfs.DataStreamer$ResponseProcessor(1303): Exception for BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 java.io.EOFException: Unexpected EOF while trying to read response from server at org.apache.hadoop.hdfs.protocolPB.PBHelperClient.vintPrefixed(PBHelperClient.java:529) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PipelineAck.readFields(PipelineAck.java:244) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer$ResponseProcessor.run(DataStreamer.java:1180) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:58,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1265829868_22 at /127.0.0.1:38036 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:45997:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:38036 dst: /127.0.0.1:45997 java.io.InterruptedIOException: Interrupted while waiting for IO on channel java.nio.channels.SocketChannel[connected local=localhost/127.0.0.1:45997 remote=/127.0.0.1:38036]. Total timeout mills is 60000, 59462 millis timeout left. at org.apache.hadoop.net.SocketIOWithTimeout$SelectorPool.select(SocketIOWithTimeout.java:350) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketIOWithTimeout.doIO(SocketIOWithTimeout.java:156) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:161) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.net.SocketInputStream.read(SocketInputStream.java:131) ~[hadoop-common-3.4.1.jar:?] at java.io.BufferedInputStream.fill(BufferedInputStream.java:244) ~[?:?] at java.io.BufferedInputStream.read1(BufferedInputStream.java:284) ~[?:?] at java.io.BufferedInputStream.read(BufferedInputStream.java:343) ~[?:?] at java.io.DataInputStream.read(DataInputStream.java:151) ~[?:?] at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:214) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:58,914 ERROR [DataXceiver for client DFSClient_NONMAPREDUCE_1265829868_22 at /127.0.0.1:45364 [Receiving block BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021] {}] datanode.DataXceiver(331): 127.0.0.1:43101:DataXceiver error processing WRITE_BLOCK operation src: /127.0.0.1:45364 dst: /127.0.0.1:43101 java.io.IOException: Premature EOF from inputStream at org.apache.hadoop.io.IOUtils.readFully(IOUtils.java:216) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doReadFully(PacketReceiver.java:221) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.doRead(PacketReceiver.java:144) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.PacketReceiver.receiveNextPacket(PacketReceiver.java:119) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receivePacket(BlockReceiver.java:553) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BlockReceiver.receiveBlock(BlockReceiver.java:1011) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.writeBlock(DataXceiver.java:920) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.opWriteBlock(Receiver.java:176) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocol.datatransfer.Receiver.processOp(Receiver.java:110) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.DataXceiver.run(DataXceiver.java:299) ~[hadoop-hdfs-3.4.1.jar:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:53:58,914 WARN [DataStreamer for file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 block BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 {}] hdfs.DataStreamer(1731): Error Recovery for BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 in pipeline [DatanodeInfoWithStorage[127.0.0.1:45997,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK], DatanodeInfoWithStorage[127.0.0.1:43101,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]]: datanode 0(DatanodeInfoWithStorage[127.0.0.1:45997,DS-08c4d45c-d2b0-48ff-83f6-c0ce2279bd18,DISK]) is bad. 2024-11-24T02:53:58,918 WARN [DataStreamer for file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 block BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 {}] hdfs.DataStreamer(859): DataStreamer Exception org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:53:58,923 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741839_1022 (size=85) 2024-11-24T02:53:58,925 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741839_1022 (size=85) 2024-11-24T02:53:59,303 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:53:59,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:00,304 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:00,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:00,358 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416830994 after 4002ms 2024-11-24T02:54:00,681 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741837_1018: GenerationStamp not matched, existing replica is blk_1073741837_1016 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T02:54:01,305 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:01,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:02,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:02,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:02,401 INFO [Time-limited test {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 after 4001ms 2024-11-24T02:54:02,401 DEBUG [Time-limited test {}] wal.TestLogRolling(407): Reading WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:54:02,405 DEBUG [Time-limited test {}] wal.TestLogRolling(419): EOF reading file /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:54:02,405 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.74 KB heapSize=3.77 KB 2024-11-24T02:54:02,406 ERROR [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842.meta {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,406 WARN [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842.meta {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,406 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34593%2C1732416815842.meta:.meta(num 1732416817341) roll requested 2024-11-24T02:54:02,406 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416842406.meta 2024-11-24T02:54:02,413 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,413 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,414 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,414 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,414 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,414 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta with entries=8, filesize=2.36 KB; new WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416842406.meta 2024-11-24T02:54:02,414 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,414 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,415 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta 2024-11-24T02:54:02,415 WARN [IPC Server handler 3 on default port 37965 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta has not been closed. Lease recovery is in progress. RecoveryId = 1024 for block blk_1073741834_1015 2024-11-24T02:54:02,415 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta after 0ms 2024-11-24T02:54:02,416 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39767:39767),(127.0.0.1/127.0.0.1:42857:42857)] 2024-11-24T02:54:02,416 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(879): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta is not closed yet, will try archiving it next time 2024-11-24T02:54:02,431 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/info/491567b4684b4c4387bdb04c177b455f is 207, key is TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54./info:regioninfo/1732416817870/Put/seqid=0 2024-11-24T02:54:02,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741841_1025 (size=7125) 2024-11-24T02:54:02,437 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741841_1025 (size=7125) 2024-11-24T02:54:02,437 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.52 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/info/491567b4684b4c4387bdb04c177b455f 2024-11-24T02:54:02,462 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/ns/8baabbfac92f42d79c37a33f88e3007c is 43, key is default/ns:d/1732416817430/Put/seqid=0 2024-11-24T02:54:02,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741842_1026 (size=5153) 2024-11-24T02:54:02,467 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741842_1026 (size=5153) 2024-11-24T02:54:02,467 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/ns/8baabbfac92f42d79c37a33f88e3007c 2024-11-24T02:54:02,484 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/table/0752883b7e5242fbab9c2f994160ff92 is 79, key is TestLogRolling-testLogRollOnPipelineRestart/table:state/1732416817882/Put/seqid=0 2024-11-24T02:54:02,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741843_1027 (size=5438) 2024-11-24T02:54:02,489 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741843_1027 (size=5438) 2024-11-24T02:54:02,489 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=150 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/table/0752883b7e5242fbab9c2f994160ff92 2024-11-24T02:54:02,495 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/info/491567b4684b4c4387bdb04c177b455f as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/info/491567b4684b4c4387bdb04c177b455f 2024-11-24T02:54:02,501 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/info/491567b4684b4c4387bdb04c177b455f, entries=10, sequenceid=11, filesize=7.0 K 2024-11-24T02:54:02,502 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/ns/8baabbfac92f42d79c37a33f88e3007c as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/ns/8baabbfac92f42d79c37a33f88e3007c 2024-11-24T02:54:02,507 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/ns/8baabbfac92f42d79c37a33f88e3007c, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T02:54:02,508 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/.tmp/table/0752883b7e5242fbab9c2f994160ff92 as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/table/0752883b7e5242fbab9c2f994160ff92 2024-11-24T02:54:02,515 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/table/0752883b7e5242fbab9c2f994160ff92, entries=2, sequenceid=11, filesize=5.3 K 2024-11-24T02:54:02,516 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~1.74 KB/1782, heapSize ~3.48 KB/3560, currentSize=0 B/0 for 1588230740 in 111ms, sequenceid=11, compaction requested=false 2024-11-24T02:54:02,516 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T02:54:02,516 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 901055aa78e8d3b8af4b34b25878ad54 1/1 column families, dataSize=4.20 KB heapSize=4.75 KB 2024-11-24T02:54:02,516 ERROR [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,517 WARN [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122-prefix:7c69a60bd8f6,34593,1732416815842 {}] wal.AbstractFSWAL(2174): append entry failed org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,517 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C34593%2C1732416815842:(num 1732416838366) roll requested 2024-11-24T02:54:02,517 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34593%2C1732416815842.1732416842517 2024-11-24T02:54:02,522 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.TestLogRolling$2(347): preLogRoll: oldFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 newFile=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416842517 2024-11-24T02:54:02,522 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,522 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,522 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,522 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,522 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,522 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416842517 2024-11-24T02:54:02,522 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,523 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. org.apache.hadoop.ipc.RemoteException: Unexpected BlockUCState: BP-491344902-172.17.0.2-1732416812399:blk_1073741839_1021 is UNDER_RECOVERY but not UNDER_CONSTRUCTION at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.checkUCBlock(FSNamesystem.java:5912) at org.apache.hadoop.hdfs.server.namenode.FSNamesystem.bumpBlockGenerationStamp(FSNamesystem.java:5980) at org.apache.hadoop.hdfs.server.namenode.NameNodeRpcServer.updateBlockForPipeline(NameNodeRpcServer.java:1002) at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolServerSideTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolServerSideTranslatorPB.java:1182) at org.apache.hadoop.hdfs.protocol.proto.ClientNamenodeProtocolProtos$ClientNamenodeProtocol$2.callBlockingMethod(ClientNamenodeProtocolProtos.java) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:621) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:589) at org.apache.hadoop.ipc.ProtobufRpcEngine2$Server$ProtoBufRpcInvoker.call(ProtobufRpcEngine2.java:573) at org.apache.hadoop.ipc.RPC$Server.call(RPC.java:1227) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1246) at org.apache.hadoop.ipc.Server$RpcCall.run(Server.java:1169) at java.base/java.security.AccessController.doPrivileged(AccessController.java:712) at java.base/javax.security.auth.Subject.doAs(Subject.java:439) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1953) at org.apache.hadoop.ipc.Server$Handler.run(Server.java:3198) at org.apache.hadoop.ipc.Client.getRpcResponse(Client.java:1584) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1529) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.Client.call(Client.java:1426) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:258) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.ipc.ProtobufRpcEngine2$Invoker.invoke(ProtobufRpcEngine2.java:139) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy43.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.lambda$updateBlockForPipeline$50(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.ipc.internal.ShadedProtobufHelper.ipc(ShadedProtobufHelper.java:160) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB.updateBlockForPipeline(ClientNamenodeProtocolTranslatorPB.java:920) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invokeMethod(RetryInvocationHandler.java:437) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeMethod(RetryInvocationHandler.java:170) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invoke(RetryInvocationHandler.java:162) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler$Call.invokeOnce(RetryInvocationHandler.java:100) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.io.retry.RetryInvocationHandler.invoke(RetryInvocationHandler.java:366) ~[hadoop-common-3.4.1.jar:?] at jdk.proxy2.$Proxy44.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at jdk.internal.reflect.GeneratedMethodAccessor114.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.fs.HFileSystem$1.invoke(HFileSystem.java:363) ~[classes/:?] at jdk.proxy2.$Proxy47.updateBlockForPipeline(Unknown Source) ~[?:?] at org.apache.hadoop.hdfs.DataStreamer.updateBlockForPipeline(DataStreamer.java:1786) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1657) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:02,523 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:54:02,523 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 after 0ms 2024-11-24T02:54:02,524 DEBUG [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39767:39767),(127.0.0.1/127.0.0.1:42857:42857)] 2024-11-24T02:54:02,525 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 to hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/oldWALs/7c69a60bd8f6%2C34593%2C1732416815842.1732416838366 2024-11-24T02:54:02,540 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54/.tmp/info/dc4026cc8ac94c02ae7480787f7cc45b is 1080, key is row1002/info:/1732416827526/Put/seqid=0 2024-11-24T02:54:02,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741845_1029 (size=9270) 2024-11-24T02:54:02,545 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741845_1029 (size=9270) 2024-11-24T02:54:02,546 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.20 KB at sequenceid=8 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54/.tmp/info/dc4026cc8ac94c02ae7480787f7cc45b 2024-11-24T02:54:02,551 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54/.tmp/info/dc4026cc8ac94c02ae7480787f7cc45b as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54/info/dc4026cc8ac94c02ae7480787f7cc45b 2024-11-24T02:54:02,557 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54/info/dc4026cc8ac94c02ae7480787f7cc45b, entries=4, sequenceid=8, filesize=9.1 K 2024-11-24T02:54:02,559 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~4.20 KB/4304, heapSize ~4.73 KB/4848, currentSize=0 B/0 for 901055aa78e8d3b8af4b34b25878ad54 in 43ms, sequenceid=8, compaction requested=false 2024-11-24T02:54:02,559 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 901055aa78e8d3b8af4b34b25878ad54: 2024-11-24T02:54:02,564 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:54:02,564 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:54:02,564 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:54:02,564 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:54:02,565 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:54:02,565 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:54:02,565 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:54:02,565 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1663741488, stopped=false 2024-11-24T02:54:02,565 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,33509,1732416815628 2024-11-24T02:54:02,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:54:02,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:54:02,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:02,596 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:02,596 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:54:02,596 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:54:02,597 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:54:02,597 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:54:02,597 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,34593,1732416815842' ***** 2024-11-24T02:54:02,597 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:54:02,597 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:54:02,597 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:54:02,597 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:54:02,597 INFO [RS:0;7c69a60bd8f6:34593 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:54:02,598 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(3091): Received CLOSE for 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,34593,1732416815842 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:34593. 2024-11-24T02:54:02,598 DEBUG [RS:0;7c69a60bd8f6:34593 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:54:02,598 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 901055aa78e8d3b8af4b34b25878ad54, disabling compactions & flushes 2024-11-24T02:54:02,598 DEBUG [RS:0;7c69a60bd8f6:34593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:54:02,598 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:54:02,598 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:54:02,598 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. after waiting 0 ms 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:54:02,598 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:54:02,598 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:54:02,599 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T02:54:02,599 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740, 901055aa78e8d3b8af4b34b25878ad54=TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54.} 2024-11-24T02:54:02,599 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:54:02,599 DEBUG [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 901055aa78e8d3b8af4b34b25878ad54 2024-11-24T02:54:02,599 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:54:02,599 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:54:02,599 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:54:02,599 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:54:02,605 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/default/TestLogRolling-testLogRollOnPipelineRestart/901055aa78e8d3b8af4b34b25878ad54/recovered.edits/11.seqid, newMaxSeqId=11, maxSeqId=1 2024-11-24T02:54:02,606 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:54:02,606 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 901055aa78e8d3b8af4b34b25878ad54: Waiting for close lock at 1732416842598Running coprocessor pre-close hooks at 1732416842598Disabling compacts and flushes for region at 1732416842598Disabling writes for close at 1732416842598Writing region close event to WAL at 1732416842599 (+1 ms)Running coprocessor post-close hooks at 1732416842606 (+7 ms)Closed at 1732416842606 2024-11-24T02:54:02,606 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRollOnPipelineRestart,,1732416817492.901055aa78e8d3b8af4b34b25878ad54. 2024-11-24T02:54:02,609 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T02:54:02,610 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:54:02,610 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:54:02,610 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416842599Running coprocessor pre-close hooks at 1732416842599Disabling compacts and flushes for region at 1732416842599Disabling writes for close at 1732416842599Writing region close event to WAL at 1732416842606 (+7 ms)Running coprocessor post-close hooks at 1732416842610 (+4 ms)Closed at 1732416842610 2024-11-24T02:54:02,610 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:54:02,750 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:54:02,799 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,34593,1732416815842; all regions closed. 2024-11-24T02:54:02,799 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,800 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,800 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,800 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,800 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:02,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741840_1023 (size=825) 2024-11-24T02:54:02,802 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741840_1023 (size=825) 2024-11-24T02:54:02,836 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: CompactionChecker was stopped 2024-11-24T02:54:02,837 INFO [regionserver/7c69a60bd8f6:0.Chore.1 {}] hbase.ScheduledChore(149): Chore: MemstoreFlusherChore was stopped 2024-11-24T02:54:03,306 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:03,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:03,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:54:03,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T02:54:03,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRollOnPipelineRestart 2024-11-24T02:54:03,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741834_1015: GenerationStamp not matched, existing replica is blk_1073741834_1010 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T02:54:04,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:04,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:05,307 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:05,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:05,597 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T02:54:06,308 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:06,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:06,416 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta after 4001ms 2024-11-24T02:54:06,420 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/WALs/7c69a60bd8f6,34593,1732416815842/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta to hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/oldWALs/7c69a60bd8f6%2C34593%2C1732416815842.meta.1732416817341.meta 2024-11-24T02:54:06,423 DEBUG [RS:0;7c69a60bd8f6:34593 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/oldWALs 2024-11-24T02:54:06,423 INFO [RS:0;7c69a60bd8f6:34593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C34593%2C1732416815842.meta:.meta(num 1732416842406) 2024-11-24T02:54:06,423 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,423 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,424 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,424 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,424 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741844_1028 (size=1162) 2024-11-24T02:54:06,425 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741844_1028 (size=1162) 2024-11-24T02:54:06,430 DEBUG [RS:0;7c69a60bd8f6:34593 {}] wal.AbstractFSWAL(1256): Moved 4 WAL file(s) to /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/oldWALs 2024-11-24T02:54:06,430 INFO [RS:0;7c69a60bd8f6:34593 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C34593%2C1732416815842:(num 1732416842517) 2024-11-24T02:54:06,430 DEBUG [RS:0;7c69a60bd8f6:34593 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:54:06,430 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:54:06,430 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:54:06,430 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T02:54:06,430 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:54:06,430 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:54:06,431 INFO [RS:0;7c69a60bd8f6:34593 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34593 2024-11-24T02:54:06,499 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:54:06,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:54:06,499 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,34593,1732416815842 2024-11-24T02:54:06,512 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,34593,1732416815842] 2024-11-24T02:54:06,522 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,34593,1732416815842 already deleted, retry=false 2024-11-24T02:54:06,522 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,34593,1732416815842 expired; onlineServers=0 2024-11-24T02:54:06,522 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,33509,1732416815628' ***** 2024-11-24T02:54:06,522 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:54:06,522 INFO [M:0;7c69a60bd8f6:33509 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:54:06,522 INFO [M:0;7c69a60bd8f6:33509 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:54:06,523 DEBUG [M:0;7c69a60bd8f6:33509 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:54:06,523 DEBUG [M:0;7c69a60bd8f6:33509 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:54:06,523 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:54:06,523 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416816663 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416816663,5,FailOnTimeoutGroup] 2024-11-24T02:54:06,523 INFO [M:0;7c69a60bd8f6:33509 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:54:06,523 INFO [M:0;7c69a60bd8f6:33509 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:54:06,523 DEBUG [M:0;7c69a60bd8f6:33509 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:54:06,523 INFO [M:0;7c69a60bd8f6:33509 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:54:06,523 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416816662 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416816662,5,FailOnTimeoutGroup] 2024-11-24T02:54:06,523 INFO [M:0;7c69a60bd8f6:33509 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:54:06,523 INFO [M:0;7c69a60bd8f6:33509 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:54:06,524 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:54:06,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:54:06,533 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:06,543 DEBUG [M:0;7c69a60bd8f6:33509 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/master already deleted, retry=false 2024-11-24T02:54:06,543 DEBUG [M:0;7c69a60bd8f6:33509 {}] master.ActiveMasterManager(353): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Failed delete of our master address node; KeeperErrorCode = NoNode for /hbase/master 2024-11-24T02:54:06,544 INFO [M:0;7c69a60bd8f6:33509 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/.lastflushedseqids 2024-11-24T02:54:06,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741846_1030 (size=120) 2024-11-24T02:54:06,549 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741846_1030 (size=120) 2024-11-24T02:54:06,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:54:06,612 INFO [RS:0;7c69a60bd8f6:34593 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:54:06,612 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34593-0x1016ac2bd9c0001, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:54:06,612 INFO [RS:0;7c69a60bd8f6:34593 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,34593,1732416815842; zookeeper connection closed. 2024-11-24T02:54:06,612 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@614e95fa {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@614e95fa 2024-11-24T02:54:06,612 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T02:54:06,950 INFO [M:0;7c69a60bd8f6:33509 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:54:06,950 INFO [M:0;7c69a60bd8f6:33509 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:54:06,950 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:54:06,950 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:06,950 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:06,950 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:54:06,951 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:06,951 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=23.18 KB heapSize=29.16 KB 2024-11-24T02:54:06,951 ERROR [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData-prefix:7c69a60bd8f6,33509,1732416815628 {}] wal.AbstractFSWAL(1838): appendAndSync throws IOException. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:06,951 WARN [FSHLog-0-hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData-prefix:7c69a60bd8f6,33509,1732416815628 {}] wal.AbstractFSWAL(2174): append entry failed java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:06,951 DEBUG [master:store-WAL-Roller {}] wal.AbstractWALRoller(198): WAL FSHLog 7c69a60bd8f6%2C33509%2C1732416815628:(num 1732416816385) roll requested 2024-11-24T02:54:06,951 INFO [master:store-WAL-Roller {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C33509%2C1732416815628.1732416846951 2024-11-24T02:54:06,958 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,958 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,958 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,958 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,958 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:06,958 INFO [master:store-WAL-Roller {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 with entries=53, filesize=26.63 KB; new WAL /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416846951 2024-11-24T02:54:06,959 WARN [Close-WAL-Writer-0 {}] wal.AbstractProtobufLogWriter(255): Failed to write trailer, non-fatal, continuing... java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:06,959 WARN [Close-WAL-Writer-0 {}] wal.AbstractFSWAL(2043): close old writer failed. java.io.IOException: All datanodes [DatanodeInfoWithStorage[127.0.0.1:34497,DS-756a0772-e916-4ae1-942d-29039e00c257,DISK]] are bad. Aborting... at org.apache.hadoop.hdfs.DataStreamer.handleBadDatanode(DataStreamer.java:1721) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineInternal(DataStreamer.java:1644) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.setupPipelineForAppendOrRecovery(DataStreamer.java:1627) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.processDatanodeOrExternalError(DataStreamer.java:1408) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DataStreamer.run(DataStreamer.java:707) ~[hadoop-hdfs-client-3.4.1.jar:?] 2024-11-24T02:54:06,959 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(134): Recover lease on dfs file hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 2024-11-24T02:54:06,959 WARN [IPC Server handler 4 on default port 37965 {}] namenode.FSNamesystem(3854): DIR* NameSystem.internalReleaseLease: File /user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 has not been closed. Lease recovery is in progress. RecoveryId = 1032 for block blk_1073741830_1013 2024-11-24T02:54:06,960 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Failed to recover lease, attempt=0 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 after 1ms 2024-11-24T02:54:06,964 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39767:39767),(127.0.0.1/127.0.0.1:42857:42857)] 2024-11-24T02:54:06,964 DEBUG [master:store-WAL-Roller {}] wal.AbstractFSWAL(879): hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 is not closed yet, will try archiving it next time 2024-11-24T02:54:06,981 DEBUG [M:0;7c69a60bd8f6:33509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10aacef814754e1a86e7458806ebb624 is 82, key is hbase:meta,,1/info:regioninfo/1732416817378/Put/seqid=0 2024-11-24T02:54:06,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741848_1033 (size=5672) 2024-11-24T02:54:06,986 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741848_1033 (size=5672) 2024-11-24T02:54:06,987 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10aacef814754e1a86e7458806ebb624 2024-11-24T02:54:07,010 DEBUG [M:0;7c69a60bd8f6:33509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/57a635d10f4d4b0d8467bb307274c644 is 779, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732416817888/Put/seqid=0 2024-11-24T02:54:07,016 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741849_1034 (size=6119) 2024-11-24T02:54:07,019 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741849_1034 (size=6119) 2024-11-24T02:54:07,019 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=22.58 KB at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/57a635d10f4d4b0d8467bb307274c644 2024-11-24T02:54:07,043 DEBUG [M:0;7c69a60bd8f6:33509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/060d2f31be0c43959244597e9beca7ea is 69, key is 7c69a60bd8f6,34593,1732416815842/rs:state/1732416816717/Put/seqid=0 2024-11-24T02:54:07,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741850_1035 (size=5156) 2024-11-24T02:54:07,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741850_1035 (size=5156) 2024-11-24T02:54:07,048 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/060d2f31be0c43959244597e9beca7ea 2024-11-24T02:54:07,069 DEBUG [M:0;7c69a60bd8f6:33509 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/316a64e2a7bf4dbd94a9b7fb1902898d is 52, key is load_balancer_on/state:d/1732416817487/Put/seqid=0 2024-11-24T02:54:07,079 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741851_1036 (size=5056) 2024-11-24T02:54:07,080 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=56 (bloomFilter=true), to=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/316a64e2a7bf4dbd94a9b7fb1902898d 2024-11-24T02:54:07,080 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741851_1036 (size=5056) 2024-11-24T02:54:07,086 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/10aacef814754e1a86e7458806ebb624 as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10aacef814754e1a86e7458806ebb624 2024-11-24T02:54:07,092 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/10aacef814754e1a86e7458806ebb624, entries=8, sequenceid=56, filesize=5.5 K 2024-11-24T02:54:07,093 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/57a635d10f4d4b0d8467bb307274c644 as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/57a635d10f4d4b0d8467bb307274c644 2024-11-24T02:54:07,105 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/57a635d10f4d4b0d8467bb307274c644, entries=6, sequenceid=56, filesize=6.0 K 2024-11-24T02:54:07,106 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/060d2f31be0c43959244597e9beca7ea as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/060d2f31be0c43959244597e9beca7ea 2024-11-24T02:54:07,111 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/060d2f31be0c43959244597e9beca7ea, entries=1, sequenceid=56, filesize=5.0 K 2024-11-24T02:54:07,112 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/316a64e2a7bf4dbd94a9b7fb1902898d as hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/316a64e2a7bf4dbd94a9b7fb1902898d 2024-11-24T02:54:07,117 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/316a64e2a7bf4dbd94a9b7fb1902898d, entries=1, sequenceid=56, filesize=4.9 K 2024-11-24T02:54:07,118 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(3140): Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=56, compaction requested=false 2024-11-24T02:54:07,121 INFO [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:07,121 DEBUG [M:0;7c69a60bd8f6:33509 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416846950Disabling compacts and flushes for region at 1732416846950Disabling writes for close at 1732416846951 (+1 ms)Obtaining lock to block concurrent updates at 1732416846951Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416846951Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=23738, getHeapSize=29800, getOffHeapSize=0, getCellsCount=67 at 1732416846951Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416846965 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416846965Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416846981 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416846981Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416846992 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416847010 (+18 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416847010Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416847027 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416847042 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416847043 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732416847054 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732416847069 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732416847069Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@32a8c99: reopening flushed file at 1732416847085 (+16 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@20fb1d21: reopening flushed file at 1732416847092 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@43162734: reopening flushed file at 1732416847105 (+13 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6bb573b2: reopening flushed file at 1732416847111 (+6 ms)Finished flush of dataSize ~23.18 KB/23738, heapSize ~29.10 KB/29800, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 167ms, sequenceid=56, compaction requested=false at 1732416847118 (+7 ms)Writing region close event to WAL at 1732416847121 (+3 ms)Closed at 1732416847121 2024-11-24T02:54:07,122 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:07,122 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:07,122 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:07,122 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:07,122 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:07,124 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:45997 is added to blk_1073741847_1031 (size=757) 2024-11-24T02:54:07,125 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:43101 is added to blk_1073741847_1031 (size=757) 2024-11-24T02:54:07,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:07,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:07,606 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,607 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,624 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,625 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,628 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,629 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,630 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:07,636 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,139 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:54:08,140 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,141 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,161 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,162 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,167 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,168 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,172 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:08,309 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:08,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:09,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:09,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:09,909 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1452): Error processing datanode Command java.io.IOException: Failed to delete 1 (out of 1) replica(s): 0) Failed to delete replica blk_1073741830_1013: GenerationStamp not matched, existing replica is blk_1073741830_1006 at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2389) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl.invalidate(FsDatasetImpl.java:2317) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActive(BPOfferService.java:743) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPOfferService.processCommandFromActor(BPOfferService.java:692) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processCommand(BPServiceActor.java:1439) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.lambda$enqueue$2(BPServiceActor.java:1485) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.processQueue(BPServiceActor.java:1412) ~[hadoop-hdfs-3.4.1.jar:?] at org.apache.hadoop.hdfs.server.datanode.BPServiceActor$CommandProcessingThread.run(BPServiceActor.java:1395) ~[hadoop-hdfs-3.4.1.jar:?] 2024-11-24T02:54:10,310 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:10,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:10,960 INFO [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(223): Recovered lease, attempt=1 on file=hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 after 4001ms 2024-11-24T02:54:10,961 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/WALs/7c69a60bd8f6,33509,1732416815628/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 to hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/oldWALs/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 2024-11-24T02:54:10,964 INFO [WAL-Archive-0 {}] region.MasterRegionUtils(50): Moved hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/MasterData/oldWALs/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385 to hdfs://localhost:37965/user/jenkins/test-data/8b493569-9e30-7944-eeee-609259597122/oldWALs/7c69a60bd8f6%2C33509%2C1732416815628.1732416816385$masterlocalwal$ 2024-11-24T02:54:10,964 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:54:10,964 INFO [M:0;7c69a60bd8f6:33509 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:54:10,964 INFO [M:0;7c69a60bd8f6:33509 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:33509 2024-11-24T02:54:10,964 INFO [M:0;7c69a60bd8f6:33509 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:54:11,125 INFO [M:0;7c69a60bd8f6:33509 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:54:11,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:54:11,125 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:33509-0x1016ac2bd9c0000, quorum=127.0.0.1:59629, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:54:11,129 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2dd41fe4{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:54:11,129 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@3420abff{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:54:11,129 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:54:11,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1bc7279c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:54:11,130 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@51065df5{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,STOPPED} 2024-11-24T02:54:11,133 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:54:11,133 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:54:11,133 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-491344902-172.17.0.2-1732416812399 (Datanode Uuid caf16356-2220-4428-8077-79779e1d94cc) service to localhost/127.0.0.1:37965 2024-11-24T02:54:11,133 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:54:11,134 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data3/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:54:11,134 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data4/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:54:11,134 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:54:11,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@38ca15c7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:54:11,141 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6573e60c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:54:11,141 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:54:11,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@518d8d55{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:54:11,141 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@730e0fff{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,STOPPED} 2024-11-24T02:54:11,145 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:54:11,145 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:54:11,145 WARN [BP-491344902-172.17.0.2-1732416812399 heartbeating to localhost/127.0.0.1:37965 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-491344902-172.17.0.2-1732416812399 (Datanode Uuid b6bb60be-a3b0-4573-9686-b5d9cbf00864) service to localhost/127.0.0.1:37965 2024-11-24T02:54:11,145 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:54:11,145 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data1/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:54:11,146 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/cluster_f814bfe8-3e23-670a-ab8d-c75e842ed028/data/data2/current/BP-491344902-172.17.0.2-1732416812399 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:54:11,146 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:54:11,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14a79ae9{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:54:11,152 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@7096145a{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:54:11,152 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:54:11,152 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@119a3311{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:54:11,153 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@6bfebe40{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir/,STOPPED} 2024-11-24T02:54:11,161 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:54:11,188 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:54:11,197 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnPipelineRestart Thread=179 (was 154) Potentially hanging thread: nioEventLoopGroup-31-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:37965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-31-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37965 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-30-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37965 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-31-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-14 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.4@localhost:37965 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-12 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-32-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-30-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-11-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:37965 from jenkins.hfs.4 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-13 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-10-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-15 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:37965 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-33-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=457 (was 450) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=190 (was 152) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9945 (was 10699) 2024-11-24T02:54:11,206 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=179, OpenFileDescriptor=457, MaxFileDescriptor=1048576, SystemLoadAverage=190, ProcessCount=11, AvailableMemoryMB=9944 2024-11-24T02:54:11,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:54:11,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.log.dir so I do NOT create it in target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca 2024-11-24T02:54:11,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/fd3235ca-6d75-8964-ab84-0c60620a90fa/hadoop.tmp.dir so I do NOT create it in target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca 2024-11-24T02:54:11,207 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd, deleteOnExit=true 2024-11-24T02:54:11,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:54:11,207 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/test.cache.data in system properties and HBase conf 2024-11-24T02:54:11,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:54:11,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:54:11,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:54:11,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:54:11,208 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:54:11,208 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:54:11,209 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:54:11,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:54:11,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:54:11,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:54:11,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:54:11,210 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:54:11,230 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:54:11,311 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:11,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:11,671 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:54:11,675 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:54:11,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:54:11,676 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:54:11,676 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:54:11,677 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:54:11,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:54:11,678 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:54:11,783 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@6d483d07{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/java.io.tmpdir/jetty-localhost-44861-hadoop-hdfs-3_4_1-tests_jar-_-any-12921303016432004793/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:54:11,783 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:44861} 2024-11-24T02:54:11,784 INFO [Time-limited test {}] server.Server(415): Started @195525ms 2024-11-24T02:54:11,797 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:54:12,051 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:54:12,056 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:54:12,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:54:12,060 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:54:12,060 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:54:12,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:54:12,062 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:54:12,169 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@43d16ee8{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/java.io.tmpdir/jetty-localhost-46421-hadoop-hdfs-3_4_1-tests_jar-_-any-12545422569843830420/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:54:12,170 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:46421} 2024-11-24T02:54:12,170 INFO [Time-limited test {}] server.Server(415): Started @195911ms 2024-11-24T02:54:12,171 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:54:12,205 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:54:12,209 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:54:12,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:54:12,209 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:54:12,209 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:54:12,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:54:12,210 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:54:12,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:12,315 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@2526c219{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/java.io.tmpdir/jetty-localhost-45233-hadoop-hdfs-3_4_1-tests_jar-_-any-12338900178133903984/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:54:12,316 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:45233} 2024-11-24T02:54:12,316 INFO [Time-limited test {}] server.Server(415): Started @196057ms 2024-11-24T02:54:12,317 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:54:12,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:13,193 WARN [Thread-1646 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data1/current/BP-1016711101-172.17.0.2-1732416851243/current, will proceed with Du for space computation calculation, 2024-11-24T02:54:13,193 WARN [Thread-1647 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data2/current/BP-1016711101-172.17.0.2-1732416851243/current, will proceed with Du for space computation calculation, 2024-11-24T02:54:13,217 WARN [Thread-1610 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:54:13,219 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99e634712f4fa54c with lease ID 0xb10f16efd1876452: Processing first storage report for DS-1684ab97-5e5e-4e98-9d6e-2b25cffbf643 from datanode DatanodeRegistration(127.0.0.1:40219, datanodeUuid=8d1aa9a2-99b0-4763-af41-8864caeeda8e, infoPort=46145, infoSecurePort=0, ipcPort=43817, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243) 2024-11-24T02:54:13,219 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99e634712f4fa54c with lease ID 0xb10f16efd1876452: from storage DS-1684ab97-5e5e-4e98-9d6e-2b25cffbf643 node DatanodeRegistration(127.0.0.1:40219, datanodeUuid=8d1aa9a2-99b0-4763-af41-8864caeeda8e, infoPort=46145, infoSecurePort=0, ipcPort=43817, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:54:13,220 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x99e634712f4fa54c with lease ID 0xb10f16efd1876452: Processing first storage report for DS-09ad0084-3ecb-4f76-977e-89afc612b57f from datanode DatanodeRegistration(127.0.0.1:40219, datanodeUuid=8d1aa9a2-99b0-4763-af41-8864caeeda8e, infoPort=46145, infoSecurePort=0, ipcPort=43817, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243) 2024-11-24T02:54:13,220 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x99e634712f4fa54c with lease ID 0xb10f16efd1876452: from storage DS-09ad0084-3ecb-4f76-977e-89afc612b57f node DatanodeRegistration(127.0.0.1:40219, datanodeUuid=8d1aa9a2-99b0-4763-af41-8864caeeda8e, infoPort=46145, infoSecurePort=0, ipcPort=43817, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:54:13,312 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:13,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:13,399 WARN [Thread-1658 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data4/current/BP-1016711101-172.17.0.2-1732416851243/current, will proceed with Du for space computation calculation, 2024-11-24T02:54:13,399 WARN [Thread-1657 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data3/current/BP-1016711101-172.17.0.2-1732416851243/current, will proceed with Du for space computation calculation, 2024-11-24T02:54:13,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T02:54:13,418 WARN [Thread-1633 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:54:13,420 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa86139dc2a8f4fa with lease ID 0xb10f16efd1876453: Processing first storage report for DS-9af4aff1-187f-4755-abe8-01a32c93aa29 from datanode DatanodeRegistration(127.0.0.1:37711, datanodeUuid=48e32b1b-ba20-40de-b21a-fc952036af1b, infoPort=45011, infoSecurePort=0, ipcPort=41653, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243) 2024-11-24T02:54:13,420 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa86139dc2a8f4fa with lease ID 0xb10f16efd1876453: from storage DS-9af4aff1-187f-4755-abe8-01a32c93aa29 node DatanodeRegistration(127.0.0.1:37711, datanodeUuid=48e32b1b-ba20-40de-b21a-fc952036af1b, infoPort=45011, infoSecurePort=0, ipcPort=41653, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:54:13,420 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xfa86139dc2a8f4fa with lease ID 0xb10f16efd1876453: Processing first storage report for DS-ac6c6efd-19f4-4970-b63c-59b87eeef063 from datanode DatanodeRegistration(127.0.0.1:37711, datanodeUuid=48e32b1b-ba20-40de-b21a-fc952036af1b, infoPort=45011, infoSecurePort=0, ipcPort=41653, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243) 2024-11-24T02:54:13,420 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xfa86139dc2a8f4fa with lease ID 0xb10f16efd1876453: from storage DS-ac6c6efd-19f4-4970-b63c-59b87eeef063 node DatanodeRegistration(127.0.0.1:37711, datanodeUuid=48e32b1b-ba20-40de-b21a-fc952036af1b, infoPort=45011, infoSecurePort=0, ipcPort=41653, storageInfo=lv=-57;cid=testClusterID;nsid=851184135;c=1732416851243), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:54:13,462 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca 2024-11-24T02:54:13,464 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/zookeeper_0, clientPort=57226, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:54:13,465 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=57226 2024-11-24T02:54:13,465 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:13,467 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:13,479 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:54:13,480 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:54:13,481 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396 with version=8 2024-11-24T02:54:13,481 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase-staging 2024-11-24T02:54:13,483 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:54:13,483 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:54:13,483 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:54:13,483 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:54:13,483 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:54:13,483 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:54:13,484 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:54:13,484 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:54:13,484 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:45433 2024-11-24T02:54:13,487 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:45433 connecting to ZooKeeper ensemble=127.0.0.1:57226 2024-11-24T02:54:13,537 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:454330x0, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:54:13,538 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:45433-0x1016ac351830000 connected 2024-11-24T02:54:13,617 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:13,619 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:13,621 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:54:13,622 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396, hbase.cluster.distributed=false 2024-11-24T02:54:13,623 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:54:13,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=45433 2024-11-24T02:54:13,628 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=45433 2024-11-24T02:54:13,631 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=45433 2024-11-24T02:54:13,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=45433 2024-11-24T02:54:13,632 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=45433 2024-11-24T02:54:13,648 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:54:13,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:54:13,648 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:54:13,649 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:54:13,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:54:13,649 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:54:13,649 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:54:13,649 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:54:13,650 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41931 2024-11-24T02:54:13,651 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:41931 connecting to ZooKeeper ensemble=127.0.0.1:57226 2024-11-24T02:54:13,652 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:13,654 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:13,669 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:419310x0, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:54:13,669 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:419310x0, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:54:13,669 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:41931-0x1016ac351830001 connected 2024-11-24T02:54:13,670 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:54:13,670 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:54:13,671 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:54:13,672 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:54:13,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41931 2024-11-24T02:54:13,672 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41931 2024-11-24T02:54:13,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41931 2024-11-24T02:54:13,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41931 2024-11-24T02:54:13,673 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41931 2024-11-24T02:54:13,685 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:45433 2024-11-24T02:54:13,685 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:13,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:54:13,690 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:54:13,691 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:13,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:54:13,700 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,701 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:54:13,701 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,45433,1732416853483 from backup master directory 2024-11-24T02:54:13,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:13,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:54:13,711 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:54:13,711 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:54:13,711 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:13,721 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/hbase.id] with ID: dd52418b-56ea-4e09-93ab-53a8786b0c2d 2024-11-24T02:54:13,721 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/.tmp/hbase.id 2024-11-24T02:54:13,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:54:13,729 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:54:13,730 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/.tmp/hbase.id]:[hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/hbase.id] 2024-11-24T02:54:13,742 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:13,742 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:54:13,744 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 2ms. 2024-11-24T02:54:13,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,753 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:54:13,770 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:54:13,771 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:54:13,771 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:54:13,772 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:54:13,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:54:13,794 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:54:13,795 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store 2024-11-24T02:54:13,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:54:13,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:54:13,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:54:13,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:54:13,806 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:13,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:13,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:54:13,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:13,806 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:54:13,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416853806Disabling compacts and flushes for region at 1732416853806Disabling writes for close at 1732416853806Writing region close event to WAL at 1732416853806Closed at 1732416853806 2024-11-24T02:54:13,807 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/.initializing 2024-11-24T02:54:13,808 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/WALs/7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:13,810 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C45433%2C1732416853483, suffix=, logDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/WALs/7c69a60bd8f6,45433,1732416853483, archiveDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/oldWALs, maxLogs=10 2024-11-24T02:54:13,810 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C45433%2C1732416853483.1732416853810 2024-11-24T02:54:13,815 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/WALs/7c69a60bd8f6,45433,1732416853483/7c69a60bd8f6%2C45433%2C1732416853483.1732416853810 2024-11-24T02:54:13,815 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46145:46145),(127.0.0.1/127.0.0.1:45011:45011)] 2024-11-24T02:54:13,816 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:54:13,816 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:54:13,816 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,816 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,818 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,819 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:54:13,819 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:13,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:13,820 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,821 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:54:13,821 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:13,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:54:13,822 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,823 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:54:13,823 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:13,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:54:13,824 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:54:13,826 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:13,826 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:54:13,826 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,827 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,828 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,829 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,829 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,830 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:54:13,831 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:54:13,833 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:54:13,833 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=865256, jitterRate=0.10023011267185211}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:54:13,834 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416853816Initializing all the Stores at 1732416853817 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416853817Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416853817Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416853817Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416853817Cleaning up temporary data from old regions at 1732416853829 (+12 ms)Region opened successfully at 1732416853834 (+5 ms) 2024-11-24T02:54:13,834 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:54:13,836 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7433702b, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:54:13,837 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:54:13,838 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:54:13,838 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:54:13,838 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:54:13,838 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T02:54:13,838 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T02:54:13,839 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:54:13,840 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:54:13,841 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:54:13,848 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:54:13,849 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:54:13,850 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:54:13,858 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:54:13,859 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:54:13,860 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:54:13,869 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:54:13,870 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:54:13,879 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:54:13,881 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:54:13,890 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:54:13,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:54:13,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:54:13,901 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,901 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,45433,1732416853483, sessionid=0x1016ac351830000, setting cluster-up flag (Was=false) 2024-11-24T02:54:13,921 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,922 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,953 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:54:13,954 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:13,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:13,974 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:14,006 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:54:14,007 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:14,008 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:54:14,009 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:54:14,009 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:54:14,010 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:54:14,010 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,45433,1732416853483 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:54:14,011 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416884013 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:54:14,013 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:54:14,013 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:54:14,013 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,014 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,014 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:54:14,016 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:54:14,016 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:54:14,016 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:54:14,020 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:54:14,020 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:54:14,024 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416854021,5,FailOnTimeoutGroup] 2024-11-24T02:54:14,024 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416854024,5,FailOnTimeoutGroup] 2024-11-24T02:54:14,024 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,024 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:54:14,024 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,024 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:54:14,033 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:54:14,034 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:54:14,034 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396 2024-11-24T02:54:14,047 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:54:14,048 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:54:14,048 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:54:14,051 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:54:14,053 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:54:14,053 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,054 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:54:14,055 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:54:14,055 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,056 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:54:14,057 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:54:14,057 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,058 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:54:14,060 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:54:14,060 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,060 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,060 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:54:14,061 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740 2024-11-24T02:54:14,062 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740 2024-11-24T02:54:14,063 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:54:14,063 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:54:14,064 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:54:14,065 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:54:14,067 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:54:14,067 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=884659, jitterRate=0.1249026507139206}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:54:14,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416854048Initializing all the Stores at 1732416854049 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416854049Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416854051 (+2 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416854051Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416854051Cleaning up temporary data from old regions at 1732416854063 (+12 ms)Region opened successfully at 1732416854068 (+5 ms) 2024-11-24T02:54:14,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:54:14,068 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:54:14,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:54:14,068 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:54:14,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:54:14,069 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:54:14,069 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416854068Disabling compacts and flushes for region at 1732416854068Disabling writes for close at 1732416854068Writing region close event to WAL at 1732416854069 (+1 ms)Closed at 1732416854069 2024-11-24T02:54:14,071 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:54:14,071 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:54:14,071 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:54:14,072 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:54:14,073 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:54:14,075 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(746): ClusterId : dd52418b-56ea-4e09-93ab-53a8786b0c2d 2024-11-24T02:54:14,075 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:54:14,080 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:54:14,080 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:54:14,091 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:54:14,091 DEBUG [RS:0;7c69a60bd8f6:41931 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@2b2ce9e1, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:54:14,102 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:41931 2024-11-24T02:54:14,102 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:54:14,102 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:54:14,102 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:54:14,103 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,45433,1732416853483 with port=41931, startcode=1732416853648 2024-11-24T02:54:14,103 DEBUG [RS:0;7c69a60bd8f6:41931 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:54:14,105 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:39877, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.5 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:54:14,105 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45433 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,105 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=45433 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,107 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396 2024-11-24T02:54:14,107 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:38631 2024-11-24T02:54:14,107 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:54:14,120 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:54:14,120 DEBUG [RS:0;7c69a60bd8f6:41931 {}] zookeeper.ZKUtil(111): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,120 WARN [RS:0;7c69a60bd8f6:41931 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:54:14,120 INFO [RS:0;7c69a60bd8f6:41931 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:54:14,120 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,120 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,41931,1732416853648] 2024-11-24T02:54:14,124 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:54:14,126 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:54:14,126 INFO [RS:0;7c69a60bd8f6:41931 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:54:14,126 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,127 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:54:14,127 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:54:14,128 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:54:14,128 DEBUG [RS:0;7c69a60bd8f6:41931 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:54:14,129 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,129 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,129 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,129 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,129 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,129 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41931,1732416853648-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:54:14,146 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:54:14,146 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41931,1732416853648-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,146 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,146 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.Replication(171): 7c69a60bd8f6,41931,1732416853648 started 2024-11-24T02:54:14,159 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,159 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,41931,1732416853648, RpcServer on 7c69a60bd8f6/172.17.0.2:41931, sessionid=0x1016ac351830001 2024-11-24T02:54:14,159 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:54:14,159 DEBUG [RS:0;7c69a60bd8f6:41931 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,159 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,41931,1732416853648' 2024-11-24T02:54:14,159 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:54:14,160 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:54:14,160 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:54:14,160 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:54:14,161 DEBUG [RS:0;7c69a60bd8f6:41931 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,161 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,41931,1732416853648' 2024-11-24T02:54:14,161 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:54:14,161 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:54:14,161 DEBUG [RS:0;7c69a60bd8f6:41931 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:54:14,161 INFO [RS:0;7c69a60bd8f6:41931 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:54:14,161 INFO [RS:0;7c69a60bd8f6:41931 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:54:14,224 WARN [7c69a60bd8f6:45433 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T02:54:14,263 INFO [RS:0;7c69a60bd8f6:41931 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C41931%2C1732416853648, suffix=, logDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648, archiveDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/oldWALs, maxLogs=32 2024-11-24T02:54:14,264 INFO [RS:0;7c69a60bd8f6:41931 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41931%2C1732416853648.1732416854263 2024-11-24T02:54:14,269 INFO [RS:0;7c69a60bd8f6:41931 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416854263 2024-11-24T02:54:14,276 DEBUG [RS:0;7c69a60bd8f6:41931 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45011:45011),(127.0.0.1/127.0.0.1:46145:46145)] 2024-11-24T02:54:14,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:14,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:14,474 DEBUG [7c69a60bd8f6:45433 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T02:54:14,474 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,476 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,41931,1732416853648, state=OPENING 2024-11-24T02:54:14,530 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:54:14,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:14,543 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:54:14,543 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:54:14,543 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:54:14,543 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,41931,1732416853648}] 2024-11-24T02:54:14,543 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:54:14,697 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:54:14,698 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:54655, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:54:14,702 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:54:14,702 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:54:14,704 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C41931%2C1732416853648.meta, suffix=.meta, logDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648, archiveDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/oldWALs, maxLogs=32 2024-11-24T02:54:14,704 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41931%2C1732416853648.meta.1732416854704.meta 2024-11-24T02:54:14,709 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.meta.1732416854704.meta 2024-11-24T02:54:14,712 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45011:45011),(127.0.0.1/127.0.0.1:46145:46145)] 2024-11-24T02:54:14,720 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:54:14,721 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:54:14,721 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:54:14,721 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:54:14,721 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:54:14,721 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:54:14,721 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:54:14,722 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:54:14,724 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:54:14,725 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:54:14,725 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,726 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:54:14,727 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:54:14,727 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,727 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:54:14,728 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:54:14,728 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,728 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:54:14,729 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:54:14,729 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,729 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:54:14,729 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:54:14,730 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740 2024-11-24T02:54:14,731 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740 2024-11-24T02:54:14,732 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:54:14,732 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:54:14,733 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:54:14,734 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:54:14,735 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=820547, jitterRate=0.043380141258239746}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:54:14,735 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:54:14,736 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416854722Writing region info on filesystem at 1732416854722Initializing all the Stores at 1732416854723 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416854723Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416854724 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416854724Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416854724Cleaning up temporary data from old regions at 1732416854732 (+8 ms)Running coprocessor post-open hooks at 1732416854735 (+3 ms)Region opened successfully at 1732416854736 (+1 ms) 2024-11-24T02:54:14,737 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416854696 2024-11-24T02:54:14,739 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:54:14,739 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:54:14,740 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,741 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,41931,1732416853648, state=OPEN 2024-11-24T02:54:14,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:54:14,782 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:54:14,782 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:54:14,782 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:14,782 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:54:14,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:54:14,786 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,41931,1732416853648 in 239 msec 2024-11-24T02:54:14,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:54:14,789 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 715 msec 2024-11-24T02:54:14,789 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:54:14,790 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:54:14,791 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:54:14,791 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,41931,1732416853648, seqNum=-1] 2024-11-24T02:54:14,791 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:54:14,793 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:49489, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:54:14,800 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 790 msec 2024-11-24T02:54:14,800 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416854800, completionTime=-1 2024-11-24T02:54:14,801 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T02:54:14,801 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:54:14,802 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T02:54:14,802 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732416914802 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732416974802 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,45433,1732416853483-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,45433,1732416853483-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,45433,1732416853483-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:45433, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,805 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:54:14,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.096sec 2024-11-24T02:54:14,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:54:14,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:54:14,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:54:14,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:54:14,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:54:14,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,45433,1732416853483-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:54:14,808 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,45433,1732416853483-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:54:14,810 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:54:14,810 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:54:14,810 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,45433,1732416853483-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:14,875 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@74cca69f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:54:14,875 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,45433,-1 for getting cluster id 2024-11-24T02:54:14,875 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:54:14,877 DEBUG [HMaster-EventLoopGroup-12-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = 'dd52418b-56ea-4e09-93ab-53a8786b0c2d' 2024-11-24T02:54:14,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:54:14,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "dd52418b-56ea-4e09-93ab-53a8786b0c2d" 2024-11-24T02:54:14,877 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3096aeae, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:54:14,878 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,45433,-1] 2024-11-24T02:54:14,878 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:54:14,878 DEBUG [RPCClient-NioEventLoopGroup-4-16 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:54:14,879 INFO [HMaster-EventLoopGroup-12-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45148, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:54:14,880 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3764e08, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:54:14,880 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:54:14,881 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,41931,1732416853648, seqNum=-1] 2024-11-24T02:54:14,881 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:54:14,882 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-13-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:44508, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:54:14,884 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:14,884 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:54:14,887 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T02:54:14,888 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T02:54:14,889 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.AsyncConnectionImpl(321): The fetched master address is 7c69a60bd8f6,45433,1732416853483 2024-11-24T02:54:14,889 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@6691e400 2024-11-24T02:54:14,889 DEBUG [RPCClient-NioEventLoopGroup-4-1 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T02:54:14,890 INFO [HMaster-EventLoopGroup-12-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:45160, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T02:54:14,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T02:54:14,891 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T02:54:14,891 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testCompactionRecordDoesntBlockRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:54:14,892 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:14,894 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T02:54:14,894 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:14,894 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testCompactionRecordDoesntBlockRolling" procId is: 4 2024-11-24T02:54:14,895 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:54:14,895 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T02:54:14,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741835_1011 (size=405) 2024-11-24T02:54:14,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741835_1011 (size=405) 2024-11-24T02:54:14,911 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 0abb64e93c92c4570886925240a58c9c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testCompactionRecordDoesntBlockRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396 2024-11-24T02:54:14,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741836_1012 (size=88) 2024-11-24T02:54:14,924 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741836_1012 (size=88) 2024-11-24T02:54:14,925 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:54:14,925 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1722): Closing 0abb64e93c92c4570886925240a58c9c, disabling compactions & flushes 2024-11-24T02:54:14,925 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:14,925 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:14,925 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. after waiting 0 ms 2024-11-24T02:54:14,925 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:14,925 INFO [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:14,925 DEBUG [RegionOpenAndInit-TestLogRolling-testCompactionRecordDoesntBlockRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 0abb64e93c92c4570886925240a58c9c: Waiting for close lock at 1732416854925Disabling compacts and flushes for region at 1732416854925Disabling writes for close at 1732416854925Writing region close event to WAL at 1732416854925Closed at 1732416854925 2024-11-24T02:54:14,927 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T02:54:14,927 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.","families":{"info":[{"qualifier":"regioninfo","vlen":87,"tag":[],"timestamp":"1732416854927"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416854927"}]},"ts":"1732416854927"} 2024-11-24T02:54:14,930 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T02:54:14,931 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T02:54:14,932 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416854931"}]},"ts":"1732416854931"} 2024-11-24T02:54:14,934 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLING in hbase:meta 2024-11-24T02:54:14,934 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0abb64e93c92c4570886925240a58c9c, ASSIGN}] 2024-11-24T02:54:14,936 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0abb64e93c92c4570886925240a58c9c, ASSIGN 2024-11-24T02:54:14,937 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0abb64e93c92c4570886925240a58c9c, ASSIGN; state=OFFLINE, location=7c69a60bd8f6,41931,1732416853648; forceNewPlan=false, retain=false 2024-11-24T02:54:15,087 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0abb64e93c92c4570886925240a58c9c, regionState=OPENING, regionLocation=7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:15,089 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0abb64e93c92c4570886925240a58c9c, ASSIGN because future has completed 2024-11-24T02:54:15,090 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0abb64e93c92c4570886925240a58c9c, server=7c69a60bd8f6,41931,1732416853648}] 2024-11-24T02:54:15,246 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:15,246 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 0abb64e93c92c4570886925240a58c9c, NAME => 'TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:54:15,246 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testCompactionRecordDoesntBlockRolling 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,246 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:54:15,246 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,246 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,248 INFO [StoreOpener-0abb64e93c92c4570886925240a58c9c-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,249 INFO [StoreOpener-0abb64e93c92c4570886925240a58c9c-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 0abb64e93c92c4570886925240a58c9c columnFamilyName info 2024-11-24T02:54:15,249 DEBUG [StoreOpener-0abb64e93c92c4570886925240a58c9c-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:54:15,249 INFO [StoreOpener-0abb64e93c92c4570886925240a58c9c-1 {}] regionserver.HStore(327): Store=0abb64e93c92c4570886925240a58c9c/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:54:15,250 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,250 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,251 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,251 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,251 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,252 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,254 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:54:15,255 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 0abb64e93c92c4570886925240a58c9c; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=870692, jitterRate=0.10714328289031982}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:54:15,255 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:54:15,255 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 0abb64e93c92c4570886925240a58c9c: Running coprocessor pre-open hook at 1732416855246Writing region info on filesystem at 1732416855246Initializing all the Stores at 1732416855247 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416855247Cleaning up temporary data from old regions at 1732416855251 (+4 ms)Running coprocessor post-open hooks at 1732416855255 (+4 ms)Region opened successfully at 1732416855255 2024-11-24T02:54:15,256 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c., pid=6, masterSystemTime=1732416855242 2024-11-24T02:54:15,258 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:15,258 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:15,259 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=0abb64e93c92c4570886925240a58c9c, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,41931,1732416853648 2024-11-24T02:54:15,262 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-13-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 0abb64e93c92c4570886925240a58c9c, server=7c69a60bd8f6,41931,1732416853648 because future has completed 2024-11-24T02:54:15,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T02:54:15,266 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 0abb64e93c92c4570886925240a58c9c, server=7c69a60bd8f6,41931,1732416853648 in 173 msec 2024-11-24T02:54:15,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T02:54:15,269 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling, region=0abb64e93c92c4570886925240a58c9c, ASSIGN in 332 msec 2024-11-24T02:54:15,270 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T02:54:15,270 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testCompactionRecordDoesntBlockRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416855270"}]},"ts":"1732416855270"} 2024-11-24T02:54:15,273 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testCompactionRecordDoesntBlockRolling, state=ENABLED in hbase:meta 2024-11-24T02:54:15,275 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T02:54:15,276 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 384 msec 2024-11-24T02:54:15,313 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:15,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:16,314 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:16,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:17,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:17,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:18,315 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:18,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:18,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,421 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,422 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,423 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,428 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,430 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,935 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:54:18,935 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,954 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,955 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,960 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:18,963 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:54:19,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:19,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:20,124 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T02:54:20,125 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testCompactionRecordDoesntBlockRolling' 2024-11-24T02:54:20,316 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:20,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:21,317 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:21,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:22,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:22,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:23,318 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:23,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:23,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_hbase_table_meta 2024-11-24T02:54:23,405 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_hbase_table_meta Metrics about Tables on a single HBase RegionServer 2024-11-24T02:54:23,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:54:23,405 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=Coprocessor.Region.CP_org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint Metrics about HBase RegionObservers 2024-11-24T02:54:23,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_master_table_store 2024-11-24T02:54:23,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_master_table_store Metrics about Tables on a single HBase RegionServer 2024-11-24T02:54:23,406 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:23,406 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T02:54:24,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:24,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:24,912 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:54:24,913 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T02:54:24,914 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testCompactionRecordDoesntBlockRolling,, stopping at row=TestLogRolling-testCompactionRecordDoesntBlockRolling ,, for max=2147483647 with caching=100 2024-11-24T02:54:24,926 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:24,926 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:24,929 DEBUG [RPCClient-NioEventLoopGroup-4-2 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testCompactionRecordDoesntBlockRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c., hostname=7c69a60bd8f6,41931,1732416853648, seqNum=2] 2024-11-24T02:54:24,936 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:24,940 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:24,941 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T02:54:24,942 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T02:54:24,943 INFO [PEWorker-2 {}] procedure.FlushTableProcedure(91): pid=7, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T02:54:24,945 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T02:54:25,104 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41931 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=8 2024-11-24T02:54:25,105 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:25,105 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2902): Flushing 0abb64e93c92c4570886925240a58c9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T02:54:25,131 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/5f323c6da4b349adb55a26e005d5f583 is 1080, key is row0001/info:/1732416864930/Put/seqid=0 2024-11-24T02:54:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741837_1013 (size=6033) 2024-11-24T02:54:25,136 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=5 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/5f323c6da4b349adb55a26e005d5f583 2024-11-24T02:54:25,136 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741837_1013 (size=6033) 2024-11-24T02:54:25,144 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/5f323c6da4b349adb55a26e005d5f583 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5f323c6da4b349adb55a26e005d5f583 2024-11-24T02:54:25,151 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5f323c6da4b349adb55a26e005d5f583, entries=1, sequenceid=5, filesize=5.9 K 2024-11-24T02:54:25,152 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0abb64e93c92c4570886925240a58c9c in 47ms, sequenceid=5, compaction requested=false 2024-11-24T02:54:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.HRegion(2603): Flush status journal for 0abb64e93c92c4570886925240a58c9c: 2024-11-24T02:54:25,152 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:25,154 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=8}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=8 2024-11-24T02:54:25,156 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster(4169): Remote procedure done, pid=8 2024-11-24T02:54:25,160 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T02:54:25,161 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 214 msec 2024-11-24T02:54:25,163 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=7, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 224 msec 2024-11-24T02:54:25,319 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:25,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:26,320 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:26,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:27,321 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:27,325 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:28,322 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:28,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:29,323 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:29,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:30,324 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:30,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:31,326 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:31,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:32,327 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:32,330 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:33,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:33,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 after 68066ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:54:33,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:33,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(233): attempt=2 on file=hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta after 68055ms java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.recoverLease(DFSClient.java:946) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:317) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$3.doCall(DistributedFileSystem.java:314) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.recoverLease(DistributedFileSystem.java:329) ~[hadoop-hdfs-client-3.4.1.jar:?] at jdk.internal.reflect.GeneratedMethodAccessor203.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverLease(RecoverLeaseFSUtils.java:222) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:155) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] 2024-11-24T02:54:34,328 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:34,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:34,953 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=7 2024-11-24T02:54:34,954 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T02:54:34,961 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:34,963 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] procedure2.ProcedureExecutor(1139): Stored pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:34,965 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T02:54:34,966 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T02:54:34,967 INFO [PEWorker-1 {}] procedure.FlushTableProcedure(91): pid=9, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T02:54:34,967 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=9, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T02:54:35,121 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41931 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=10 2024-11-24T02:54:35,121 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:35,121 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2902): Flushing 0abb64e93c92c4570886925240a58c9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T02:54:35,127 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/5ddc8ba826d545e59317ae095bd322e1 is 1080, key is row0002/info:/1732416874957/Put/seqid=0 2024-11-24T02:54:35,133 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741838_1014 (size=6033) 2024-11-24T02:54:35,134 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741838_1014 (size=6033) 2024-11-24T02:54:35,134 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=9 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/5ddc8ba826d545e59317ae095bd322e1 2024-11-24T02:54:35,141 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/5ddc8ba826d545e59317ae095bd322e1 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5ddc8ba826d545e59317ae095bd322e1 2024-11-24T02:54:35,145 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5ddc8ba826d545e59317ae095bd322e1, entries=1, sequenceid=9, filesize=5.9 K 2024-11-24T02:54:35,146 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0abb64e93c92c4570886925240a58c9c in 25ms, sequenceid=9, compaction requested=false 2024-11-24T02:54:35,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.HRegion(2603): Flush status journal for 0abb64e93c92c4570886925240a58c9c: 2024-11-24T02:54:35,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:35,146 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-1 {event_type=RS_FLUSH_REGIONS, pid=10}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=10 2024-11-24T02:54:35,147 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster(4169): Remote procedure done, pid=10 2024-11-24T02:54:35,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=9 2024-11-24T02:54:35,151 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 181 msec 2024-11-24T02:54:35,153 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=9, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 190 msec 2024-11-24T02:54:35,329 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:35,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:36,331 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:36,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:37,332 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:37,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:38,333 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:38,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:39,334 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:39,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:40,335 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:40,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:41,336 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:41,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:42,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:42,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:43,337 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:43,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:43,461 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T02:54:44,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:44,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:44,972 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=9 2024-11-24T02:54:44,972 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T02:54:44,975 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41931%2C1732416853648.1732416884974 2024-11-24T02:54:44,979 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:44,979 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:44,980 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:44,980 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:44,980 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:44,980 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416854263 with entries=8, filesize=5.41 KB; new WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416884974 2024-11-24T02:54:44,981 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45011:45011),(127.0.0.1/127.0.0.1:46145:46145)] 2024-11-24T02:54:44,981 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416854263 is not closed yet, will try archiving it next time 2024-11-24T02:54:44,981 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:44,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741833_1009 (size=5546) 2024-11-24T02:54:44,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741833_1009 (size=5546) 2024-11-24T02:54:44,988 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] procedure2.ProcedureExecutor(1139): Stored pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:44,990 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T02:54:44,990 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T02:54:44,991 INFO [PEWorker-5 {}] procedure.FlushTableProcedure(91): pid=11, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T02:54:44,991 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T02:54:45,144 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=1,queue=1,port=41931 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=12 2024-11-24T02:54:45,145 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:45,145 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2902): Flushing 0abb64e93c92c4570886925240a58c9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T02:54:45,149 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/9ae2d474ab5047988f765bf8c3975de0 is 1080, key is row0003/info:/1732416884973/Put/seqid=0 2024-11-24T02:54:45,161 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741840_1016 (size=6033) 2024-11-24T02:54:45,162 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741840_1016 (size=6033) 2024-11-24T02:54:45,163 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=13 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/9ae2d474ab5047988f765bf8c3975de0 2024-11-24T02:54:45,170 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/9ae2d474ab5047988f765bf8c3975de0 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/9ae2d474ab5047988f765bf8c3975de0 2024-11-24T02:54:45,177 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/9ae2d474ab5047988f765bf8c3975de0, entries=1, sequenceid=13, filesize=5.9 K 2024-11-24T02:54:45,178 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0abb64e93c92c4570886925240a58c9c in 33ms, sequenceid=13, compaction requested=true 2024-11-24T02:54:45,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.HRegion(2603): Flush status journal for 0abb64e93c92c4570886925240a58c9c: 2024-11-24T02:54:45,178 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:45,179 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-2 {event_type=RS_FLUSH_REGIONS, pid=12}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=12 2024-11-24T02:54:45,179 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster(4169): Remote procedure done, pid=12 2024-11-24T02:54:45,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T02:54:45,182 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 189 msec 2024-11-24T02:54:45,184 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=11, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 201 msec 2024-11-24T02:54:45,338 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:45,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:46,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:46,339 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:47,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:47,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:48,340 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:48,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:49,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:49,341 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:50,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:50,342 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:51,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:51,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:52,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:52,343 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:53,344 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:53,345 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:54,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:54,346 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:54,811 INFO [master/7c69a60bd8f6:0.Chore.1 {}] migrate.RollingUpgradeChore(116): There is no table to migrate StoreFileTracker! 2024-11-24T02:54:54,811 INFO [master/7c69a60bd8f6:0.Chore.1 {}] migrate.RollingUpgradeChore(85): All Rolling-Upgrade tasks are complete, shutdown RollingUpgradeChore! 2024-11-24T02:54:55,082 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=11 2024-11-24T02:54:55,083 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T02:54:55,083 DEBUG [Time-limited test {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:54:55,087 DEBUG [Time-limited test {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 18099 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:54:55,087 DEBUG [Time-limited test {}] regionserver.HStore(1541): 0abb64e93c92c4570886925240a58c9c/info is initiating minor compaction (all files) 2024-11-24T02:54:55,088 INFO [Time-limited test {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:54:55,088 INFO [Time-limited test {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:54:55,088 INFO [Time-limited test {}] regionserver.HRegion(2416): Starting compaction of 0abb64e93c92c4570886925240a58c9c/info in TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:55,088 INFO [Time-limited test {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5f323c6da4b349adb55a26e005d5f583, hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5ddc8ba826d545e59317ae095bd322e1, hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/9ae2d474ab5047988f765bf8c3975de0] into tmpdir=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp, totalSize=17.7 K 2024-11-24T02:54:55,089 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5f323c6da4b349adb55a26e005d5f583, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=5, earliestPutTs=1732416864930 2024-11-24T02:54:55,089 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 5ddc8ba826d545e59317ae095bd322e1, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=9, earliestPutTs=1732416874957 2024-11-24T02:54:55,090 DEBUG [Time-limited test {}] compactions.Compactor(225): Compacting 9ae2d474ab5047988f765bf8c3975de0, keycount=1, bloomtype=ROW, size=5.9 K, encoding=NONE, compression=NONE, seqNum=13, earliestPutTs=1732416884973 2024-11-24T02:54:55,103 INFO [Time-limited test {}] throttle.PressureAwareThroughputController(145): 0abb64e93c92c4570886925240a58c9c#info#compaction#45 average throughput is unlimited, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:54:55,104 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/845d4dc455c2451ab20880ddeab18ddb is 1080, key is row0001/info:/1732416864930/Put/seqid=0 2024-11-24T02:54:55,108 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741841_1017 (size=8296) 2024-11-24T02:54:55,109 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741841_1017 (size=8296) 2024-11-24T02:54:55,115 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/845d4dc455c2451ab20880ddeab18ddb as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/845d4dc455c2451ab20880ddeab18ddb 2024-11-24T02:54:55,124 INFO [Time-limited test {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 0abb64e93c92c4570886925240a58c9c/info of 0abb64e93c92c4570886925240a58c9c into 845d4dc455c2451ab20880ddeab18ddb(size=8.1 K), total size for store is 8.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:54:55,124 DEBUG [Time-limited test {}] regionserver.HRegion(2446): Compaction status journal for 0abb64e93c92c4570886925240a58c9c: 2024-11-24T02:54:55,127 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41931%2C1732416853648.1732416895126 2024-11-24T02:54:55,132 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:55,133 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:55,133 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:55,133 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:55,133 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:54:55,133 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416884974 with entries=4, filesize=2.45 KB; new WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416895126 2024-11-24T02:54:55,134 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45011:45011),(127.0.0.1/127.0.0.1:46145:46145)] 2024-11-24T02:54:55,134 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416884974 is not closed yet, will try archiving it next time 2024-11-24T02:54:55,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741839_1015 (size=2520) 2024-11-24T02:54:55,135 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741839_1015 (size=2520) 2024-11-24T02:54:55,136 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416854263 to hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/oldWALs/7c69a60bd8f6%2C41931%2C1732416853648.1732416854263 2024-11-24T02:54:55,137 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster$22(4506): Client=jenkins//172.17.0.2 flush TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:55,138 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] procedure2.ProcedureExecutor(1139): Stored pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:54:55,139 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T02:54:55,139 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_PREPARE, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_PREPARE 2024-11-24T02:54:55,140 INFO [PEWorker-4 {}] procedure.FlushTableProcedure(91): pid=13, state=RUNNABLE:FLUSH_TABLE_FLUSH_REGIONS, hasLock=true; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling execute state=FLUSH_TABLE_FLUSH_REGIONS 2024-11-24T02:54:55,140 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=14, ppid=13, state=RUNNABLE, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure}] 2024-11-24T02:54:55,293 DEBUG [RpcServer.priority.RWQ.Fifo.read.handler=2,queue=1,port=41931 {}] regionserver.RSRpcServices(3929): Executing remote procedure class org.apache.hadoop.hbase.regionserver.FlushRegionCallable, pid=14 2024-11-24T02:54:55,293 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(51): Starting region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:55,293 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2902): Flushing 0abb64e93c92c4570886925240a58c9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T02:54:55,299 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/91929b2b558644399447f6caff3e225b is 1080, key is row0000/info:/1732416895125/Put/seqid=0 2024-11-24T02:54:55,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741843_1019 (size=6033) 2024-11-24T02:54:55,307 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741843_1019 (size=6033) 2024-11-24T02:54:55,308 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=18 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/91929b2b558644399447f6caff3e225b 2024-11-24T02:54:55,315 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/91929b2b558644399447f6caff3e225b as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/91929b2b558644399447f6caff3e225b 2024-11-24T02:54:55,320 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/91929b2b558644399447f6caff3e225b, entries=1, sequenceid=18, filesize=5.9 K 2024-11-24T02:54:55,321 INFO [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0abb64e93c92c4570886925240a58c9c in 28ms, sequenceid=18, compaction requested=false 2024-11-24T02:54:55,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.HRegion(2603): Flush status journal for 0abb64e93c92c4570886925240a58c9c: 2024-11-24T02:54:55,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.FlushRegionCallable(64): Closing region operation on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:54:55,321 DEBUG [RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0-0 {event_type=RS_FLUSH_REGIONS, pid=14}] regionserver.RemoteProcedureResultReporter(62): Successfully complete execution of pid=14 2024-11-24T02:54:55,322 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.HMaster(4169): Remote procedure done, pid=14 2024-11-24T02:54:55,325 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=14, resume processing ppid=13 2024-11-24T02:54:55,326 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=14, ppid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushRegionProcedure in 183 msec 2024-11-24T02:54:55,328 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, state=SUCCESS, hasLock=false; org.apache.hadoop.hbase.master.procedure.FlushTableProcedure, id=13, table=TestLogRolling-testCompactionRecordDoesntBlockRolling in 189 msec 2024-11-24T02:54:55,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:55,347 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:56,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:56,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:57,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:57,348 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:58,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:58,349 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:59,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:54:59,350 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:00,246 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsRegionWrapperImpl$HRegionMetricsWrapperRunnable(329): Region 0abb64e93c92c4570886925240a58c9c, had cached 0 bytes from a total of 14329 2024-11-24T02:55:00,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:00,352 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:01,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:01,353 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:02,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:02,354 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:03,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:03,355 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:04,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:04,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:05,232 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=45433 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=13 2024-11-24T02:55:05,232 INFO [RPCClient-NioEventLoopGroup-4-3 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: FLUSH, Table Name: default:TestLogRolling-testCompactionRecordDoesntBlockRolling completed 2024-11-24T02:55:05,234 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41931%2C1732416853648.1732416905234 2024-11-24T02:55:05,280 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,280 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,280 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,280 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,281 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,281 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416895126 with entries=3, filesize=1.97 KB; new WAL /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416905234 2024-11-24T02:55:05,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:46145:46145),(127.0.0.1/127.0.0.1:45011:45011)] 2024-11-24T02:55:05,283 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416895126 is not closed yet, will try archiving it next time 2024-11-24T02:55:05,283 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:55:05,283 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/WALs/7c69a60bd8f6,41931,1732416853648/7c69a60bd8f6%2C41931%2C1732416853648.1732416884974 to hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/oldWALs/7c69a60bd8f6%2C41931%2C1732416853648.1732416884974 2024-11-24T02:55:05,284 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:55:05,284 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:05,284 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:05,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741842_1018 (size=2026) 2024-11-24T02:55:05,285 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741842_1018 (size=2026) 2024-11-24T02:55:05,286 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:05,286 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:55:05,286 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:55:05,286 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1748818449, stopped=false 2024-11-24T02:55:05,287 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,45433,1732416853483 2024-11-24T02:55:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:05,349 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:05,349 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:55:05,350 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:55:05,350 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:05,350 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:05,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:05,350 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,41931,1732416853648' ***** 2024-11-24T02:55:05,350 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:55:05,350 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:55:05,350 INFO [RS:0;7c69a60bd8f6:41931 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:55:05,350 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:55:05,350 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:05,350 INFO [RS:0;7c69a60bd8f6:41931 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:55:05,350 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(3091): Received CLOSE for 0abb64e93c92c4570886925240a58c9c 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,41931,1732416853648 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:41931. 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 0abb64e93c92c4570886925240a58c9c, disabling compactions & flushes 2024-11-24T02:55:05,351 DEBUG [RS:0;7c69a60bd8f6:41931 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:05,351 DEBUG [RS:0;7c69a60bd8f6:41931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:05,351 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. after waiting 0 ms 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:55:05,351 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(2902): Flushing 0abb64e93c92c4570886925240a58c9c 1/1 column families, dataSize=1.05 KB heapSize=1.38 KB 2024-11-24T02:55:05,351 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1321): Waiting on 2 regions to close 2024-11-24T02:55:05,351 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1325): Online Regions={0abb64e93c92c4570886925240a58c9c=TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c., 1588230740=hbase:meta,,1.1588230740} 2024-11-24T02:55:05,351 DEBUG [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1351): Waiting on 0abb64e93c92c4570886925240a58c9c, 1588230740 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:55:05,351 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:55:05,351 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:55:05,351 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=1.89 KB heapSize=3.91 KB 2024-11-24T02:55:05,356 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/8260d8c7b0874139808ffd9f81e482f9 is 1080, key is row0001/info:/1732416905233/Put/seqid=0 2024-11-24T02:55:05,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:05,356 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:05,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741845_1021 (size=6033) 2024-11-24T02:55:05,360 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741845_1021 (size=6033) 2024-11-24T02:55:05,361 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.05 KB at sequenceid=22 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/8260d8c7b0874139808ffd9f81e482f9 2024-11-24T02:55:05,367 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/.tmp/info/8260d8c7b0874139808ffd9f81e482f9 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/8260d8c7b0874139808ffd9f81e482f9 2024-11-24T02:55:05,369 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/info/14926d3010b14069a740dde4000aed61 is 227, key is TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c./info:regioninfo/1732416855259/Put/seqid=0 2024-11-24T02:55:05,374 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/8260d8c7b0874139808ffd9f81e482f9, entries=1, sequenceid=22, filesize=5.9 K 2024-11-24T02:55:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741846_1022 (size=7308) 2024-11-24T02:55:05,375 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741846_1022 (size=7308) 2024-11-24T02:55:05,375 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=1.65 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/info/14926d3010b14069a740dde4000aed61 2024-11-24T02:55:05,375 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(3140): Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0abb64e93c92c4570886925240a58c9c in 24ms, sequenceid=22, compaction requested=true 2024-11-24T02:55:05,376 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5f323c6da4b349adb55a26e005d5f583, hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5ddc8ba826d545e59317ae095bd322e1, hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/9ae2d474ab5047988f765bf8c3975de0] to archive 2024-11-24T02:55:05,377 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T02:55:05,378 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5f323c6da4b349adb55a26e005d5f583 to hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5f323c6da4b349adb55a26e005d5f583 2024-11-24T02:55:05,379 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5ddc8ba826d545e59317ae095bd322e1 to hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/5ddc8ba826d545e59317ae095bd322e1 2024-11-24T02:55:05,380 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/9ae2d474ab5047988f765bf8c3975de0 to hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/archive/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/info/9ae2d474ab5047988f765bf8c3975de0 2024-11-24T02:55:05,381 DEBUG [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7c69a60bd8f6:45433 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T02:55:05,381 WARN [StoreCloser-TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [5f323c6da4b349adb55a26e005d5f583=6033, 5ddc8ba826d545e59317ae095bd322e1=6033, 9ae2d474ab5047988f765bf8c3975de0=6033] 2024-11-24T02:55:05,384 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/default/TestLogRolling-testCompactionRecordDoesntBlockRolling/0abb64e93c92c4570886925240a58c9c/recovered.edits/25.seqid, newMaxSeqId=25, maxSeqId=1 2024-11-24T02:55:05,385 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:55:05,385 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 0abb64e93c92c4570886925240a58c9c: Waiting for close lock at 1732416905351Running coprocessor pre-close hooks at 1732416905351Disabling compacts and flushes for region at 1732416905351Disabling writes for close at 1732416905351Obtaining lock to block concurrent updates at 1732416905351Preparing flush snapshotting stores in 0abb64e93c92c4570886925240a58c9c at 1732416905351Finished memstore snapshotting TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c., syncing WAL and waiting on mvcc, flushsize=dataSize=1076, getHeapSize=1392, getOffHeapSize=0, getCellsCount=1 at 1732416905351Flushing stores of TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. at 1732416905352 (+1 ms)Flushing 0abb64e93c92c4570886925240a58c9c/info: creating writer at 1732416905352Flushing 0abb64e93c92c4570886925240a58c9c/info: appending metadata at 1732416905355 (+3 ms)Flushing 0abb64e93c92c4570886925240a58c9c/info: closing flushed file at 1732416905355Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@725309e0: reopening flushed file at 1732416905366 (+11 ms)Finished flush of dataSize ~1.05 KB/1076, heapSize ~1.36 KB/1392, currentSize=0 B/0 for 0abb64e93c92c4570886925240a58c9c in 24ms, sequenceid=22, compaction requested=true at 1732416905375 (+9 ms)Writing region close event to WAL at 1732416905381 (+6 ms)Running coprocessor post-close hooks at 1732416905385 (+4 ms)Closed at 1732416905385 2024-11-24T02:55:05,385 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testCompactionRecordDoesntBlockRolling,,1732416854891.0abb64e93c92c4570886925240a58c9c. 2024-11-24T02:55:05,400 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/ns/1a4c20e3a7be4549bc80465a3b5efa22 is 43, key is default/ns:d/1732416854794/Put/seqid=0 2024-11-24T02:55:05,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741847_1023 (size=5153) 2024-11-24T02:55:05,404 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741847_1023 (size=5153) 2024-11-24T02:55:05,405 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/ns/1a4c20e3a7be4549bc80465a3b5efa22 2024-11-24T02:55:05,427 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/table/6abd5d986f1c414d92dd0384ed4d407a is 89, key is TestLogRolling-testCompactionRecordDoesntBlockRolling/table:state/1732416855270/Put/seqid=0 2024-11-24T02:55:05,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741848_1024 (size=5508) 2024-11-24T02:55:05,431 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741848_1024 (size=5508) 2024-11-24T02:55:05,431 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=170 B at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/table/6abd5d986f1c414d92dd0384ed4d407a 2024-11-24T02:55:05,437 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/info/14926d3010b14069a740dde4000aed61 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/info/14926d3010b14069a740dde4000aed61 2024-11-24T02:55:05,444 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/info/14926d3010b14069a740dde4000aed61, entries=10, sequenceid=11, filesize=7.1 K 2024-11-24T02:55:05,445 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/ns/1a4c20e3a7be4549bc80465a3b5efa22 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/ns/1a4c20e3a7be4549bc80465a3b5efa22 2024-11-24T02:55:05,452 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/ns/1a4c20e3a7be4549bc80465a3b5efa22, entries=2, sequenceid=11, filesize=5.0 K 2024-11-24T02:55:05,453 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/.tmp/table/6abd5d986f1c414d92dd0384ed4d407a as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/table/6abd5d986f1c414d92dd0384ed4d407a 2024-11-24T02:55:05,459 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/table/6abd5d986f1c414d92dd0384ed4d407a, entries=2, sequenceid=11, filesize=5.4 K 2024-11-24T02:55:05,460 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 109ms, sequenceid=11, compaction requested=false 2024-11-24T02:55:05,466 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/data/hbase/meta/1588230740/recovered.edits/14.seqid, newMaxSeqId=14, maxSeqId=1 2024-11-24T02:55:05,467 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:55:05,467 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:05,467 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416905351Running coprocessor pre-close hooks at 1732416905351Disabling compacts and flushes for region at 1732416905351Disabling writes for close at 1732416905351Obtaining lock to block concurrent updates at 1732416905351Preparing flush snapshotting stores in 1588230740 at 1732416905351Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=1932, getHeapSize=3936, getOffHeapSize=0, getCellsCount=14 at 1732416905352 (+1 ms)Flushing stores of hbase:meta,,1.1588230740 at 1732416905352Flushing 1588230740/info: creating writer at 1732416905352Flushing 1588230740/info: appending metadata at 1732416905369 (+17 ms)Flushing 1588230740/info: closing flushed file at 1732416905369Flushing 1588230740/ns: creating writer at 1732416905381 (+12 ms)Flushing 1588230740/ns: appending metadata at 1732416905399 (+18 ms)Flushing 1588230740/ns: closing flushed file at 1732416905399Flushing 1588230740/table: creating writer at 1732416905410 (+11 ms)Flushing 1588230740/table: appending metadata at 1732416905426 (+16 ms)Flushing 1588230740/table: closing flushed file at 1732416905426Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@394f5067: reopening flushed file at 1732416905437 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@5cd57059: reopening flushed file at 1732416905444 (+7 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@723181d4: reopening flushed file at 1732416905452 (+8 ms)Finished flush of dataSize ~1.89 KB/1932, heapSize ~3.61 KB/3696, currentSize=0 B/0 for 1588230740 in 109ms, sequenceid=11, compaction requested=false at 1732416905460 (+8 ms)Writing region close event to WAL at 1732416905463 (+3 ms)Running coprocessor post-close hooks at 1732416905467 (+4 ms)Closed at 1732416905467 2024-11-24T02:55:05,467 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:05,551 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,41931,1732416853648; all regions closed. 2024-11-24T02:55:05,552 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,552 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,552 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,552 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,552 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741834_1010 (size=3306) 2024-11-24T02:55:05,554 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741834_1010 (size=3306) 2024-11-24T02:55:05,557 DEBUG [RS:0;7c69a60bd8f6:41931 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/oldWALs 2024-11-24T02:55:05,557 INFO [RS:0;7c69a60bd8f6:41931 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C41931%2C1732416853648.meta:.meta(num 1732416854704) 2024-11-24T02:55:05,557 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,557 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,558 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,558 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,558 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741844_1020 (size=1252) 2024-11-24T02:55:05,560 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741844_1020 (size=1252) 2024-11-24T02:55:05,563 DEBUG [RS:0;7c69a60bd8f6:41931 {}] wal.AbstractFSWAL(1256): Moved 2 WAL file(s) to /user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/oldWALs 2024-11-24T02:55:05,563 INFO [RS:0;7c69a60bd8f6:41931 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C41931%2C1732416853648:(num 1732416905234) 2024-11-24T02:55:05,563 DEBUG [RS:0;7c69a60bd8f6:41931 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:05,563 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:55:05,563 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:55:05,563 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T02:55:05,563 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:55:05,563 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:55:05,563 INFO [RS:0;7c69a60bd8f6:41931 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41931 2024-11-24T02:55:05,570 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,41931,1732416853648 2024-11-24T02:55:05,570 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:55:05,570 ERROR [Time-limited test-EventThread {}] zookeeper.ClientCnxn$EventThread(581): Error while calling watcher. java.util.concurrent.RejectedExecutionException: Task org.apache.hadoop.hbase.trace.TraceUtil$$Lambda$363/0x00007fe598903a08@56044b2c rejected from java.util.concurrent.ThreadPoolExecutor@523bd534[Shutting down, pool size = 1, active threads = 0, queued tasks = 0, completed tasks = 14] at java.util.concurrent.ThreadPoolExecutor$AbortPolicy.rejectedExecution(ThreadPoolExecutor.java:2065) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.reject(ThreadPoolExecutor.java:833) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.execute(ThreadPoolExecutor.java:1365) ~[?:?] at java.util.concurrent.Executors$DelegatedExecutorService.execute(Executors.java:721) ~[?:?] at org.apache.hadoop.hbase.zookeeper.ZKWatcher.process(ZKWatcher.java:613) ~[hbase-zookeeper-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.zookeeper.ClientCnxn$EventThread.processEvent(ClientCnxn.java:579) ~[zookeeper-3.8.4.jar:3.8.4] at org.apache.zookeeper.ClientCnxn$EventThread.run(ClientCnxn.java:554) ~[zookeeper-3.8.4.jar:3.8.4] 2024-11-24T02:55:05,571 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:55:05,581 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,41931,1732416853648] 2024-11-24T02:55:05,591 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,41931,1732416853648 already deleted, retry=false 2024-11-24T02:55:05,591 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,41931,1732416853648 expired; onlineServers=0 2024-11-24T02:55:05,591 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,45433,1732416853483' ***** 2024-11-24T02:55:05,591 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:55:05,591 INFO [M:0;7c69a60bd8f6:45433 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:55:05,591 INFO [M:0;7c69a60bd8f6:45433 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:55:05,591 DEBUG [M:0;7c69a60bd8f6:45433 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:55:05,592 DEBUG [M:0;7c69a60bd8f6:45433 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:55:05,592 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:55:05,592 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416854021 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416854021,5,FailOnTimeoutGroup] 2024-11-24T02:55:05,592 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416854024 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416854024,5,FailOnTimeoutGroup] 2024-11-24T02:55:05,592 INFO [M:0;7c69a60bd8f6:45433 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:55:05,592 INFO [M:0;7c69a60bd8f6:45433 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:55:05,592 DEBUG [M:0;7c69a60bd8f6:45433 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:55:05,592 INFO [M:0;7c69a60bd8f6:45433 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:55:05,592 INFO [M:0;7c69a60bd8f6:45433 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:55:05,592 INFO [M:0;7c69a60bd8f6:45433 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:55:05,592 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:55:05,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:55:05,602 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:05,602 DEBUG [M:0;7c69a60bd8f6:45433 {}] zookeeper.ZKUtil(347): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T02:55:05,602 WARN [M:0;7c69a60bd8f6:45433 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T02:55:05,603 INFO [M:0;7c69a60bd8f6:45433 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/.lastflushedseqids 2024-11-24T02:55:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741849_1025 (size=130) 2024-11-24T02:55:05,608 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741849_1025 (size=130) 2024-11-24T02:55:05,609 INFO [M:0;7c69a60bd8f6:45433 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:55:05,609 INFO [M:0;7c69a60bd8f6:45433 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:55:05,609 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:55:05,609 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:05,609 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:05,609 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:55:05,609 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:05,609 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=43.59 KB heapSize=55 KB 2024-11-24T02:55:05,627 DEBUG [M:0;7c69a60bd8f6:45433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc64a3c2a5fa4320a5125b2709c79f11 is 82, key is hbase:meta,,1/info:regioninfo/1732416854740/Put/seqid=0 2024-11-24T02:55:05,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741850_1026 (size=5672) 2024-11-24T02:55:05,632 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741850_1026 (size=5672) 2024-11-24T02:55:05,632 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc64a3c2a5fa4320a5125b2709c79f11 2024-11-24T02:55:05,655 DEBUG [M:0;7c69a60bd8f6:45433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/290b7b8762f5411995848122f4df54d1 is 798, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732416855276/Put/seqid=0 2024-11-24T02:55:05,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741851_1027 (size=7823) 2024-11-24T02:55:05,660 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741851_1027 (size=7823) 2024-11-24T02:55:05,661 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=42.99 KB at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/290b7b8762f5411995848122f4df54d1 2024-11-24T02:55:05,666 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 290b7b8762f5411995848122f4df54d1 2024-11-24T02:55:05,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:05,681 INFO [RS:0;7c69a60bd8f6:41931 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:55:05,681 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:41931-0x1016ac351830001, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:05,681 INFO [RS:0;7c69a60bd8f6:41931 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,41931,1732416853648; zookeeper connection closed. 2024-11-24T02:55:05,681 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@69f2a651 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@69f2a651 2024-11-24T02:55:05,681 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T02:55:05,682 DEBUG [M:0;7c69a60bd8f6:45433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/abd0920674874afdb0fd2982212f4d93 is 69, key is 7c69a60bd8f6,41931,1732416853648/rs:state/1732416854105/Put/seqid=0 2024-11-24T02:55:05,686 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741852_1028 (size=5156) 2024-11-24T02:55:05,687 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741852_1028 (size=5156) 2024-11-24T02:55:05,687 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/abd0920674874afdb0fd2982212f4d93 2024-11-24T02:55:05,707 DEBUG [M:0;7c69a60bd8f6:45433 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30d811386cf34feda5ac32785468c105 is 52, key is load_balancer_on/state:d/1732416854886/Put/seqid=0 2024-11-24T02:55:05,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741853_1029 (size=5056) 2024-11-24T02:55:05,711 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741853_1029 (size=5056) 2024-11-24T02:55:05,712 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=121 (bloomFilter=true), to=hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30d811386cf34feda5ac32785468c105 2024-11-24T02:55:05,718 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/cc64a3c2a5fa4320a5125b2709c79f11 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc64a3c2a5fa4320a5125b2709c79f11 2024-11-24T02:55:05,723 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/cc64a3c2a5fa4320a5125b2709c79f11, entries=8, sequenceid=121, filesize=5.5 K 2024-11-24T02:55:05,724 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/290b7b8762f5411995848122f4df54d1 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/290b7b8762f5411995848122f4df54d1 2024-11-24T02:55:05,728 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for 290b7b8762f5411995848122f4df54d1 2024-11-24T02:55:05,728 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/290b7b8762f5411995848122f4df54d1, entries=14, sequenceid=121, filesize=7.6 K 2024-11-24T02:55:05,729 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/abd0920674874afdb0fd2982212f4d93 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/abd0920674874afdb0fd2982212f4d93 2024-11-24T02:55:05,734 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/abd0920674874afdb0fd2982212f4d93, entries=1, sequenceid=121, filesize=5.0 K 2024-11-24T02:55:05,735 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/30d811386cf34feda5ac32785468c105 as hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/30d811386cf34feda5ac32785468c105 2024-11-24T02:55:05,739 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:38631/user/jenkins/test-data/d8a48db9-804f-75dd-7ebf-67d05b1e8396/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/30d811386cf34feda5ac32785468c105, entries=1, sequenceid=121, filesize=4.9 K 2024-11-24T02:55:05,741 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(3140): Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=121, compaction requested=false 2024-11-24T02:55:05,742 INFO [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:05,742 DEBUG [M:0;7c69a60bd8f6:45433 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416905609Disabling compacts and flushes for region at 1732416905609Disabling writes for close at 1732416905609Obtaining lock to block concurrent updates at 1732416905609Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416905609Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=44638, getHeapSize=56256, getOffHeapSize=0, getCellsCount=140 at 1732416905610 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416905610Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416905610Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416905627 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416905627Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416905637 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416905654 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416905654Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416905666 (+12 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416905682 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416905682Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732416905691 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732416905706 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732416905706Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6ad5f778: reopening flushed file at 1732416905717 (+11 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@25771c79: reopening flushed file at 1732416905723 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@237efca0: reopening flushed file at 1732416905729 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1b27c928: reopening flushed file at 1732416905734 (+5 ms)Finished flush of dataSize ~43.59 KB/44638, heapSize ~54.94 KB/56256, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 131ms, sequenceid=121, compaction requested=false at 1732416905741 (+7 ms)Writing region close event to WAL at 1732416905742 (+1 ms)Closed at 1732416905742 2024-11-24T02:55:05,742 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,742 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,742 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,743 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,743 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:05,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:37711 is added to blk_1073741830_1006 (size=53035) 2024-11-24T02:55:05,745 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40219 is added to blk_1073741830_1006 (size=53035) 2024-11-24T02:55:05,745 INFO [M:0;7c69a60bd8f6:45433 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:55:05,745 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:55:05,745 INFO [M:0;7c69a60bd8f6:45433 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:45433 2024-11-24T02:55:05,746 INFO [M:0;7c69a60bd8f6:45433 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:55:05,986 INFO [M:0;7c69a60bd8f6:45433 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:55:05,986 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:05,987 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:45433-0x1016ac351830000, quorum=127.0.0.1:57226, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:05,992 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@2526c219{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:05,992 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@74e6f5d9{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:05,993 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:05,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4bd1d692{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:05,993 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@10c2896a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:05,996 WARN [BP-1016711101-172.17.0.2-1732416851243 heartbeating to localhost/127.0.0.1:38631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:55:05,996 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:55:05,996 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:55:05,996 WARN [BP-1016711101-172.17.0.2-1732416851243 heartbeating to localhost/127.0.0.1:38631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1016711101-172.17.0.2-1732416851243 (Datanode Uuid 48e32b1b-ba20-40de-b21a-fc952036af1b) service to localhost/127.0.0.1:38631 2024-11-24T02:55:05,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data3/current/BP-1016711101-172.17.0.2-1732416851243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:05,997 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data4/current/BP-1016711101-172.17.0.2-1732416851243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:05,997 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:55:05,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@43d16ee8{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:05,999 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5b135886{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:05,999 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:05,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@439cbd97{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:05,999 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2cfed7b4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:06,001 WARN [BP-1016711101-172.17.0.2-1732416851243 heartbeating to localhost/127.0.0.1:38631 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:55:06,001 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:55:06,001 WARN [BP-1016711101-172.17.0.2-1732416851243 heartbeating to localhost/127.0.0.1:38631 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1016711101-172.17.0.2-1732416851243 (Datanode Uuid 8d1aa9a2-99b0-4763-af41-8864caeeda8e) service to localhost/127.0.0.1:38631 2024-11-24T02:55:06,001 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:55:06,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data1/current/BP-1016711101-172.17.0.2-1732416851243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:06,001 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/cluster_f31c6d58-cfc4-4e2c-1331-99a8fb2b63cd/data/data2/current/BP-1016711101-172.17.0.2-1732416851243 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:06,001 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:55:06,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@6d483d07{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:55:06,007 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@1106c0e7{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:06,007 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:06,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@4ac76b28{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:06,007 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@293e66d4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:06,013 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:55:06,029 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:55:06,036 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testCompactionRecordDoesntBlockRolling Thread=206 (was 179) Potentially hanging thread: LeaseRenewer:jenkins.hfs.5@localhost:38631 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38631 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38631 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38631 from jenkins.hfs.5 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-34-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-37-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:38631 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38631 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-34-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-13-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: RPCClient-NioEventLoopGroup-4-16 java.base@17.0.11/sun.nio.ch.EPoll.wait(Native Method) java.base@17.0.11/sun.nio.ch.EPollSelectorImpl.doSelect(EPollSelectorImpl.java:118) java.base@17.0.11/sun.nio.ch.SelectorImpl.lockAndDoSelect(SelectorImpl.java:129) java.base@17.0.11/sun.nio.ch.SelectorImpl.select(SelectorImpl.java:146) app//org.apache.hbase.thirdparty.io.netty.channel.nio.SelectedSelectionKeySetSelector.select(SelectedSelectionKeySetSelector.java:68) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.select(NioEventLoop.java:879) app//org.apache.hbase.thirdparty.io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:526) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38631 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-12-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-35-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:38631 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-35-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: regionserver/7c69a60bd8f6:0.leaseChecker java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hbase.regionserver.LeaseManager.run(LeaseManager.java:82) Potentially hanging thread: nioEventLoopGroup-37-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-36-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:38631 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=483 (was 457) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=151 (was 190), ProcessCount=11 (was 11), AvailableMemoryMB=9993 (was 9944) - AvailableMemoryMB LEAK? - 2024-11-24T02:55:06,042 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRolling Thread=206, OpenFileDescriptor=483, MaxFileDescriptor=1048576, SystemLoadAverage=151, ProcessCount=11, AvailableMemoryMB=9993 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.log.dir so I do NOT create it in target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/9cdf611c-0158-9ada-2db0-18fcee12ddca/hadoop.tmp.dir so I do NOT create it in target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29, deleteOnExit=true 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/test.cache.data in system properties and HBase conf 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:55:06,043 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:55:06,043 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:55:06,044 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:55:06,057 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:55:06,135 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:55:06,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:06,357 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:06,426 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:06,430 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:55:06,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:55:06,431 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:55:06,431 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:55:06,432 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:06,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ecf816b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:55:06,432 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ce0132a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:55:06,523 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@7b29c022{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/java.io.tmpdir/jetty-localhost-43973-hadoop-hdfs-3_4_1-tests_jar-_-any-7003102016712666728/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:55:06,524 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@4e36d39c{HTTP/1.1, (http/1.1)}{localhost:43973} 2024-11-24T02:55:06,524 INFO [Time-limited test {}] server.Server(415): Started @250265ms 2024-11-24T02:55:06,534 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:55:06,763 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:06,766 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:55:06,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:55:06,767 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:55:06,767 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:55:06,767 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@354bdaa4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:55:06,768 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@18d1ee92{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:55:06,878 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@23e1642c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/java.io.tmpdir/jetty-localhost-34491-hadoop-hdfs-3_4_1-tests_jar-_-any-10913216534309832123/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:06,878 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@767f877d{HTTP/1.1, (http/1.1)}{localhost:34491} 2024-11-24T02:55:06,879 INFO [Time-limited test {}] server.Server(415): Started @250620ms 2024-11-24T02:55:06,879 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:55:06,902 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:06,904 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:55:06,905 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:55:06,905 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:55:06,905 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:55:06,905 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@49490ce4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:55:06,906 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@19fe8881{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:55:07,002 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@47bcda8c{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/java.io.tmpdir/jetty-localhost-45131-hadoop-hdfs-3_4_1-tests_jar-_-any-17199323446678511656/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:07,003 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@29765213{HTTP/1.1, (http/1.1)}{localhost:45131} 2024-11-24T02:55:07,003 INFO [Time-limited test {}] server.Server(415): Started @250744ms 2024-11-24T02:55:07,004 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:55:07,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:07,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:08,025 WARN [Thread-1961 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data1/current/BP-839236611-172.17.0.2-1732416906061/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:08,026 WARN [Thread-1962 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data2/current/BP-839236611-172.17.0.2-1732416906061/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:08,046 WARN [Thread-1925 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:55:08,048 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20016bc483aa897d with lease ID 0x2c86176a0fd10838: Processing first storage report for DS-1005f09c-06a5-4460-a9fe-217c64b13d7c from datanode DatanodeRegistration(127.0.0.1:35805, datanodeUuid=cd06f77c-4ff5-4b2d-b0b3-a4d5ee5fbe1a, infoPort=39783, infoSecurePort=0, ipcPort=45483, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061) 2024-11-24T02:55:08,048 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20016bc483aa897d with lease ID 0x2c86176a0fd10838: from storage DS-1005f09c-06a5-4460-a9fe-217c64b13d7c node DatanodeRegistration(127.0.0.1:35805, datanodeUuid=cd06f77c-4ff5-4b2d-b0b3-a4d5ee5fbe1a, infoPort=39783, infoSecurePort=0, ipcPort=45483, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:55:08,048 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x20016bc483aa897d with lease ID 0x2c86176a0fd10838: Processing first storage report for DS-d5960869-fa5c-4827-b9af-eec2c634d168 from datanode DatanodeRegistration(127.0.0.1:35805, datanodeUuid=cd06f77c-4ff5-4b2d-b0b3-a4d5ee5fbe1a, infoPort=39783, infoSecurePort=0, ipcPort=45483, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061) 2024-11-24T02:55:08,048 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x20016bc483aa897d with lease ID 0x2c86176a0fd10838: from storage DS-d5960869-fa5c-4827-b9af-eec2c634d168 node DatanodeRegistration(127.0.0.1:35805, datanodeUuid=cd06f77c-4ff5-4b2d-b0b3-a4d5ee5fbe1a, infoPort=39783, infoSecurePort=0, ipcPort=45483, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:55:08,150 WARN [Thread-1972 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data3/current/BP-839236611-172.17.0.2-1732416906061/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:08,150 WARN [Thread-1973 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data4/current/BP-839236611-172.17.0.2-1732416906061/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:08,165 WARN [Thread-1948 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:55:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe43ccb7f356082a7 with lease ID 0x2c86176a0fd10839: Processing first storage report for DS-c05b1dc7-7d24-411e-b45e-75757d35fee9 from datanode DatanodeRegistration(127.0.0.1:35353, datanodeUuid=85dbf609-e02d-45aa-b358-602e87b9aad6, infoPort=36005, infoSecurePort=0, ipcPort=35729, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061) 2024-11-24T02:55:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe43ccb7f356082a7 with lease ID 0x2c86176a0fd10839: from storage DS-c05b1dc7-7d24-411e-b45e-75757d35fee9 node DatanodeRegistration(127.0.0.1:35353, datanodeUuid=85dbf609-e02d-45aa-b358-602e87b9aad6, infoPort=36005, infoSecurePort=0, ipcPort=35729, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:55:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0xe43ccb7f356082a7 with lease ID 0x2c86176a0fd10839: Processing first storage report for DS-fb40cea0-83f5-4f0a-951b-fdb13e6bfb7e from datanode DatanodeRegistration(127.0.0.1:35353, datanodeUuid=85dbf609-e02d-45aa-b358-602e87b9aad6, infoPort=36005, infoSecurePort=0, ipcPort=35729, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061) 2024-11-24T02:55:08,167 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0xe43ccb7f356082a7 with lease ID 0x2c86176a0fd10839: from storage DS-fb40cea0-83f5-4f0a-951b-fdb13e6bfb7e node DatanodeRegistration(127.0.0.1:35353, datanodeUuid=85dbf609-e02d-45aa-b358-602e87b9aad6, infoPort=36005, infoSecurePort=0, ipcPort=35729, storageInfo=lv=-57;cid=testClusterID;nsid=2017015210;c=1732416906061), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:55:08,253 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77 2024-11-24T02:55:08,258 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/zookeeper_0, clientPort=54630, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:55:08,259 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=54630 2024-11-24T02:55:08,259 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:08,261 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:08,271 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:55:08,272 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:55:08,273 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e with version=8 2024-11-24T02:55:08,273 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase-staging 2024-11-24T02:55:08,275 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:55:08,276 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:08,276 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:08,276 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:55:08,276 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:08,276 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:55:08,276 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:55:08,276 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:55:08,277 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:41039 2024-11-24T02:55:08,278 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:41039 connecting to ZooKeeper ensemble=127.0.0.1:54630 2024-11-24T02:55:08,350 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:410390x0, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:55:08,351 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:41039-0x1016ac4278c0000 connected 2024-11-24T02:55:08,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:08,358 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:08,433 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:08,435 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:08,436 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:08,437 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e, hbase.cluster.distributed=false 2024-11-24T02:55:08,439 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:55:08,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=41039 2024-11-24T02:55:08,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=41039 2024-11-24T02:55:08,439 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=41039 2024-11-24T02:55:08,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=41039 2024-11-24T02:55:08,440 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=41039 2024-11-24T02:55:08,455 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:55:08,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:08,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:08,455 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:55:08,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:08,455 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:55:08,455 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:55:08,456 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:55:08,456 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:34923 2024-11-24T02:55:08,458 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:34923 connecting to ZooKeeper ensemble=127.0.0.1:54630 2024-11-24T02:55:08,459 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:08,460 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:08,475 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:349230x0, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:55:08,476 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:349230x0, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:08,476 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:34923-0x1016ac4278c0001 connected 2024-11-24T02:55:08,476 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:55:08,478 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:55:08,479 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:55:08,480 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:55:08,483 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=34923 2024-11-24T02:55:08,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=34923 2024-11-24T02:55:08,484 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=34923 2024-11-24T02:55:08,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=34923 2024-11-24T02:55:08,488 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=34923 2024-11-24T02:55:08,500 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:41039 2024-11-24T02:55:08,500 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:08,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:08,507 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:08,507 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:08,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:55:08,517 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,518 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:55:08,518 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,41039,1732416908275 from backup master directory 2024-11-24T02:55:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:08,528 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:08,528 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:55:08,528 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:08,532 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/hbase.id] with ID: 262b0af9-d95f-4ac6-b78f-bbe754ed0d59 2024-11-24T02:55:08,532 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/.tmp/hbase.id 2024-11-24T02:55:08,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:55:08,537 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:55:08,538 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/.tmp/hbase.id]:[hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/hbase.id] 2024-11-24T02:55:08,548 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:08,548 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:55:08,549 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T02:55:08,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,559 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:55:08,564 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:55:08,565 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:55:08,565 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:55:08,566 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:55:08,571 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:55:08,572 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:55:08,572 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store 2024-11-24T02:55:08,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:55:08,578 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:55:08,578 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:08,578 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:55:08,578 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:08,578 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:08,578 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:55:08,579 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:08,579 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:08,579 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416908578Disabling compacts and flushes for region at 1732416908578Disabling writes for close at 1732416908578Writing region close event to WAL at 1732416908579 (+1 ms)Closed at 1732416908579 2024-11-24T02:55:08,580 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/.initializing 2024-11-24T02:55:08,580 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/WALs/7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:08,582 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C41039%2C1732416908275, suffix=, logDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/WALs/7c69a60bd8f6,41039,1732416908275, archiveDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/oldWALs, maxLogs=10 2024-11-24T02:55:08,583 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C41039%2C1732416908275.1732416908583 2024-11-24T02:55:08,588 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/WALs/7c69a60bd8f6,41039,1732416908275/7c69a60bd8f6%2C41039%2C1732416908275.1732416908583 2024-11-24T02:55:08,591 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36005:36005),(127.0.0.1/127.0.0.1:39783:39783)] 2024-11-24T02:55:08,596 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:55:08,596 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:08,596 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,596 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,597 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,598 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:55:08,598 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:08,599 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:55:08,600 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,600 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:08,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,601 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:55:08,601 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:08,602 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,603 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:55:08,603 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,603 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:08,603 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,604 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,604 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,605 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,605 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,606 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:55:08,607 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:08,608 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:55:08,609 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=847097, jitterRate=0.07713975012302399}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:55:08,609 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416908596Initializing all the Stores at 1732416908597 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416908597Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416908597Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416908597Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416908597Cleaning up temporary data from old regions at 1732416908605 (+8 ms)Region opened successfully at 1732416908609 (+4 ms) 2024-11-24T02:55:08,611 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:55:08,614 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@4d1743e0, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:55:08,615 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:55:08,615 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:55:08,615 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:55:08,615 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:55:08,616 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T02:55:08,616 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T02:55:08,616 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:55:08,618 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:55:08,618 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:55:08,622 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:55:08,623 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:55:08,623 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:55:08,633 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:55:08,633 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:55:08,634 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:55:08,643 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:55:08,644 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:55:08,654 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:55:08,656 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:55:08,665 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:55:08,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:08,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:08,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,675 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,676 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,41039,1732416908275, sessionid=0x1016ac4278c0000, setting cluster-up flag (Was=false) 2024-11-24T02:55:08,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,696 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,728 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:55:08,732 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:08,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,749 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:08,781 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:55:08,784 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:08,786 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:55:08,789 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:08,790 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:55:08,790 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:55:08,790 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,41039,1732416908275 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:55:08,791 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(746): ClusterId : 262b0af9-d95f-4ac6-b78f-bbe754ed0d59 2024-11-24T02:55:08,791 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:55:08,791 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:08,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:08,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:08,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:08,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:55:08,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:55:08,792 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,796 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:08,796 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:55:08,796 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416938796 2024-11-24T02:55:08,796 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:55:08,796 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:55:08,796 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:55:08,796 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:55:08,796 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:55:08,797 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:55:08,797 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,797 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:55:08,800 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,800 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:55:08,801 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:55:08,801 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:55:08,801 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:55:08,801 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:55:08,802 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416908801,5,FailOnTimeoutGroup] 2024-11-24T02:55:08,802 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:55:08,802 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:55:08,803 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416908802,5,FailOnTimeoutGroup] 2024-11-24T02:55:08,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:55:08,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:55:08,805 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:55:08,806 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:55:08,806 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e 2024-11-24T02:55:08,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:55:08,810 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:55:08,811 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:08,812 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:55:08,813 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:55:08,813 DEBUG [RS:0;7c69a60bd8f6:34923 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@f4ac395, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:55:08,813 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:55:08,813 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:08,814 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:55:08,815 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:55:08,815 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:08,815 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:55:08,816 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:55:08,816 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:08,816 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:55:08,817 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:55:08,817 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:08,818 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:08,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:55:08,818 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740 2024-11-24T02:55:08,819 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740 2024-11-24T02:55:08,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:55:08,820 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:55:08,820 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:55:08,822 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:55:08,824 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:55:08,824 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=863064, jitterRate=0.09744308888912201}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:55:08,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416908811Initializing all the Stores at 1732416908812 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416908812Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416908812Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416908812Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416908812Cleaning up temporary data from old regions at 1732416908820 (+8 ms)Region opened successfully at 1732416908824 (+4 ms) 2024-11-24T02:55:08,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:55:08,825 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:55:08,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:55:08,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:55:08,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:55:08,825 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:08,825 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416908825Disabling compacts and flushes for region at 1732416908825Disabling writes for close at 1732416908825Writing region close event to WAL at 1732416908825Closed at 1732416908825 2024-11-24T02:55:08,825 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:34923 2024-11-24T02:55:08,825 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:55:08,825 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:55:08,825 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:55:08,826 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,41039,1732416908275 with port=34923, startcode=1732416908455 2024-11-24T02:55:08,826 DEBUG [RS:0;7c69a60bd8f6:34923 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:55:08,826 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:08,826 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:55:08,826 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:55:08,827 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:55:08,827 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37687, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.6 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:55:08,828 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41039 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:08,828 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41039 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:08,828 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:55:08,829 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e 2024-11-24T02:55:08,829 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:46357 2024-11-24T02:55:08,829 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:55:08,833 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:55:08,833 DEBUG [RS:0;7c69a60bd8f6:34923 {}] zookeeper.ZKUtil(111): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:08,833 WARN [RS:0;7c69a60bd8f6:34923 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:55:08,833 INFO [RS:0;7c69a60bd8f6:34923 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:55:08,834 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:08,834 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,34923,1732416908455] 2024-11-24T02:55:08,837 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:55:08,838 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:55:08,839 INFO [RS:0;7c69a60bd8f6:34923 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:55:08,839 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,839 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:55:08,840 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:55:08,840 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:55:08,840 DEBUG [RS:0;7c69a60bd8f6:34923 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:55:08,841 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,841 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,841 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,841 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,841 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,841 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34923,1732416908455-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:55:08,853 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:55:08,854 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,34923,1732416908455-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,854 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,854 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.Replication(171): 7c69a60bd8f6,34923,1732416908455 started 2024-11-24T02:55:08,866 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:08,866 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,34923,1732416908455, RpcServer on 7c69a60bd8f6/172.17.0.2:34923, sessionid=0x1016ac4278c0001 2024-11-24T02:55:08,866 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:55:08,866 DEBUG [RS:0;7c69a60bd8f6:34923 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:08,866 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34923,1732416908455' 2024-11-24T02:55:08,866 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:55:08,867 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:55:08,867 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:55:08,867 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:55:08,867 DEBUG [RS:0;7c69a60bd8f6:34923 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:08,867 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,34923,1732416908455' 2024-11-24T02:55:08,867 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:55:08,868 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:55:08,868 DEBUG [RS:0;7c69a60bd8f6:34923 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:55:08,868 INFO [RS:0;7c69a60bd8f6:34923 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:55:08,868 INFO [RS:0;7c69a60bd8f6:34923 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:55:08,970 INFO [RS:0;7c69a60bd8f6:34923 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34923%2C1732416908455, suffix=, logDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455, archiveDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/oldWALs, maxLogs=32 2024-11-24T02:55:08,970 INFO [RS:0;7c69a60bd8f6:34923 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34923%2C1732416908455.1732416908970 2024-11-24T02:55:08,976 INFO [RS:0;7c69a60bd8f6:34923 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416908970 2024-11-24T02:55:08,977 DEBUG [RS:0;7c69a60bd8f6:34923 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36005:36005),(127.0.0.1/127.0.0.1:39783:39783)] 2024-11-24T02:55:08,978 DEBUG [7c69a60bd8f6:41039 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T02:55:08,979 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:08,980 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,34923,1732416908455, state=OPENING 2024-11-24T02:55:08,991 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:55:09,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:09,001 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:09,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:09,002 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:09,002 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:55:09,002 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34923,1732416908455}] 2024-11-24T02:55:09,155 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:55:09,159 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:37343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:55:09,165 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:55:09,165 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:55:09,167 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C34923%2C1732416908455.meta, suffix=.meta, logDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455, archiveDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/oldWALs, maxLogs=32 2024-11-24T02:55:09,168 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34923%2C1732416908455.meta.1732416909168.meta 2024-11-24T02:55:09,173 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.meta.1732416909168.meta 2024-11-24T02:55:09,180 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39783:39783),(127.0.0.1/127.0.0.1:36005:36005)] 2024-11-24T02:55:09,184 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:55:09,185 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:55:09,185 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:55:09,185 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:55:09,185 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:55:09,185 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:09,185 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:55:09,185 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:55:09,186 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:55:09,187 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:55:09,187 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:09,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:09,187 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:55:09,188 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:55:09,188 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:09,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:09,188 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:55:09,189 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:55:09,189 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:09,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:09,189 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:55:09,190 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:55:09,190 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:09,190 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:09,190 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:55:09,191 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740 2024-11-24T02:55:09,192 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740 2024-11-24T02:55:09,193 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:55:09,193 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:55:09,194 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:55:09,195 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:55:09,196 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=879496, jitterRate=0.11833822727203369}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:55:09,196 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:55:09,196 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416909185Writing region info on filesystem at 1732416909185Initializing all the Stores at 1732416909186 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416909186Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416909186Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416909186Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416909186Cleaning up temporary data from old regions at 1732416909193 (+7 ms)Running coprocessor post-open hooks at 1732416909196 (+3 ms)Region opened successfully at 1732416909196 2024-11-24T02:55:09,197 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416909154 2024-11-24T02:55:09,199 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:55:09,199 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:55:09,200 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:09,200 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,34923,1732416908455, state=OPEN 2024-11-24T02:55:09,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:55:09,246 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:55:09,246 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:09,246 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:09,246 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:09,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:55:09,252 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,34923,1732416908455 in 244 msec 2024-11-24T02:55:09,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:55:09,256 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 426 msec 2024-11-24T02:55:09,257 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:09,257 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:55:09,258 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:55:09,258 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,34923,1732416908455, seqNum=-1] 2024-11-24T02:55:09,259 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:55:09,260 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50645, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:55:09,264 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 476 msec 2024-11-24T02:55:09,264 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416909264, completionTime=-1 2024-11-24T02:55:09,264 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T02:55:09,264 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732416969266 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732417029266 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 1 msec 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41039,1732416908275-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41039,1732416908275-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41039,1732416908275-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:41039, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:09,266 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:09,267 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:09,268 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 0.742sec 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41039,1732416908275-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:55:09,270 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41039,1732416908275-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:55:09,273 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:55:09,273 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:55:09,273 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,41039,1732416908275-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:09,291 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3cc27ae2, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:55:09,291 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,41039,-1 for getting cluster id 2024-11-24T02:55:09,291 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:55:09,292 DEBUG [HMaster-EventLoopGroup-14-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '262b0af9-d95f-4ac6-b78f-bbe754ed0d59' 2024-11-24T02:55:09,292 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:55:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "262b0af9-d95f-4ac6-b78f-bbe754ed0d59" 2024-11-24T02:55:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@37ab9207, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:55:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,41039,-1] 2024-11-24T02:55:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:55:09,293 DEBUG [RPCClient-NioEventLoopGroup-4-4 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:09,295 INFO [HMaster-EventLoopGroup-14-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60772, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:55:09,296 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@3b74706c, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:55:09,296 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:55:09,297 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,34923,1732416908455, seqNum=-1] 2024-11-24T02:55:09,297 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:55:09,298 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-15-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34530, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:55:09,300 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:09,300 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:09,303 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T02:55:09,303 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching master stub from registry 2024-11-24T02:55:09,304 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.AsyncConnectionImpl(321): The fetched master address is 7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:09,304 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] client.ConnectionUtils(555): The fetched master stub is org.apache.hadoop.hbase.shaded.protobuf.generated.MasterProtos$MasterService$Stub@10dedffc 2024-11-24T02:55:09,304 DEBUG [RPCClient-NioEventLoopGroup-4-5 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=MasterService, sasl=false 2024-11-24T02:55:09,305 INFO [HMaster-EventLoopGroup-14-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60786, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=MasterService 2024-11-24T02:55:09,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41039 {}] util.TableDescriptorChecker(321): MAX_FILESIZE for table descriptor or "hbase.hregion.max.filesize" (786432) is too small, which might cause over splitting into unmanageable number of regions. 2024-11-24T02:55:09,306 WARN [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41039 {}] util.TableDescriptorChecker(321): MEMSTORE_FLUSHSIZE for table descriptor or "hbase.hregion.memstore.flush.size" (8192) is too small, which might cause very frequent flushing. 2024-11-24T02:55:09,306 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41039 {}] master.HMaster$4(2454): Client=jenkins//172.17.0.2 create 'TestLogRolling-testLogRolling', {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:55:09,308 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41039 {}] procedure2.ProcedureExecutor(1139): Stored pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling 2024-11-24T02:55:09,309 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_PRE_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_PRE_OPERATION 2024-11-24T02:55:09,310 DEBUG [PEWorker-3 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:09,310 INFO [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41039 {}] master.MasterRpcServices(787): Client=jenkins//172.17.0.2 procedure request for creating table: namespace: "default" qualifier: "TestLogRolling-testLogRolling" procId is: 4 2024-11-24T02:55:09,311 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41039 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:55:09,311 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_WRITE_FS_LAYOUT, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_WRITE_FS_LAYOUT 2024-11-24T02:55:09,318 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741835_1011 (size=381) 2024-11-24T02:55:09,319 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741835_1011 (size=381) 2024-11-24T02:55:09,320 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(7572): creating {ENCODED => 768cc1871b22dcaa3f13dc427eba7bc8, NAME => 'TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='TestLogRolling-testLogRolling', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, regionDir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e 2024-11-24T02:55:09,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741836_1012 (size=64) 2024-11-24T02:55:09,327 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741836_1012 (size=64) 2024-11-24T02:55:09,327 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:09,327 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1722): Closing 768cc1871b22dcaa3f13dc427eba7bc8, disabling compactions & flushes 2024-11-24T02:55:09,327 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:09,327 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:09,328 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. after waiting 0 ms 2024-11-24T02:55:09,328 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:09,328 INFO [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:09,328 DEBUG [RegionOpenAndInit-TestLogRolling-testLogRolling-pool-0 {}] regionserver.HRegion(1676): Region close journal for 768cc1871b22dcaa3f13dc427eba7bc8: Waiting for close lock at 1732416909327Disabling compacts and flushes for region at 1732416909327Disabling writes for close at 1732416909328 (+1 ms)Writing region close event to WAL at 1732416909328Closed at 1732416909328 2024-11-24T02:55:09,329 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ADD_TO_META, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ADD_TO_META 2024-11-24T02:55:09,329 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":2,"row":"TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732416909329"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416909329"}]},"ts":"1732416909329"} 2024-11-24T02:55:09,331 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(832): Added 1 regions to meta. 2024-11-24T02:55:09,332 INFO [PEWorker-3 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_ASSIGN_REGIONS, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_ASSIGN_REGIONS 2024-11-24T02:55:09,332 DEBUG [PEWorker-3 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416909332"}]},"ts":"1732416909332"} 2024-11-24T02:55:09,334 INFO [PEWorker-3 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLING in hbase:meta 2024-11-24T02:55:09,334 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, ASSIGN}] 2024-11-24T02:55:09,335 INFO [PEWorker-4 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, ASSIGN 2024-11-24T02:55:09,336 INFO [PEWorker-4 {}] assignment.TransitRegionStateProcedure(269): Starting pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, ASSIGN; state=OFFLINE, location=7c69a60bd8f6,34923,1732416908455; forceNewPlan=false, retain=false 2024-11-24T02:55:09,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:09,359 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:09,487 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=768cc1871b22dcaa3f13dc427eba7bc8, regionState=OPENING, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:09,491 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=5, ppid=4, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, ASSIGN because future has completed 2024-11-24T02:55:09,492 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 768cc1871b22dcaa3f13dc427eba7bc8, server=7c69a60bd8f6,34923,1732416908455}] 2024-11-24T02:55:09,651 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:09,651 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7752): Opening region: {ENCODED => 768cc1871b22dcaa3f13dc427eba7bc8, NAME => 'TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:55:09,651 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,651 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:09,651 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7794): checking encryption for 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,651 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(7797): checking classloading for 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,653 INFO [StoreOpener-768cc1871b22dcaa3f13dc427eba7bc8-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,654 INFO [StoreOpener-768cc1871b22dcaa3f13dc427eba7bc8-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 768cc1871b22dcaa3f13dc427eba7bc8 columnFamilyName info 2024-11-24T02:55:09,654 DEBUG [StoreOpener-768cc1871b22dcaa3f13dc427eba7bc8-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:09,655 INFO [StoreOpener-768cc1871b22dcaa3f13dc427eba7bc8-1 {}] regionserver.HStore(327): Store=768cc1871b22dcaa3f13dc427eba7bc8/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:09,655 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1038): replaying wal for 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,656 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,656 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,657 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1048): stopping wal replay for 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,657 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1060): Cleaning up temporary data for 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,658 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1093): writing seq id for 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,661 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:55:09,661 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1114): Opened 768cc1871b22dcaa3f13dc427eba7bc8; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=849859, jitterRate=0.08065171539783478}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:55:09,661 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:09,662 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegion(1006): Region open journal for 768cc1871b22dcaa3f13dc427eba7bc8: Running coprocessor pre-open hook at 1732416909651Writing region info on filesystem at 1732416909651Initializing all the Stores at 1732416909652 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416909652Cleaning up temporary data from old regions at 1732416909657 (+5 ms)Running coprocessor post-open hooks at 1732416909661 (+4 ms)Region opened successfully at 1732416909662 (+1 ms) 2024-11-24T02:55:09,663 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., pid=6, masterSystemTime=1732416909647 2024-11-24T02:55:09,664 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:09,665 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=6}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:09,665 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=5 updating hbase:meta row=768cc1871b22dcaa3f13dc427eba7bc8, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:09,667 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=6, ppid=5, state=RUNNABLE, hasLock=false; OpenRegionProcedure 768cc1871b22dcaa3f13dc427eba7bc8, server=7c69a60bd8f6,34923,1732416908455 because future has completed 2024-11-24T02:55:09,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=6, resume processing ppid=5 2024-11-24T02:55:09,670 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=6, ppid=5, state=SUCCESS, hasLock=false; OpenRegionProcedure 768cc1871b22dcaa3f13dc427eba7bc8, server=7c69a60bd8f6,34923,1732416908455 in 176 msec 2024-11-24T02:55:09,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=5, resume processing ppid=4 2024-11-24T02:55:09,672 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=5, ppid=4, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, ASSIGN in 336 msec 2024-11-24T02:55:09,673 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_UPDATE_DESC_CACHE, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_UPDATE_DESC_CACHE 2024-11-24T02:55:09,674 DEBUG [PEWorker-1 {}] hbase.MetaTableAccessor(964): Put {"totalColumns":1,"row":"TestLogRolling-testLogRolling","families":{"table":[{"qualifier":"state","vlen":2,"tag":[],"timestamp":"1732416909673"}]},"ts":"1732416909673"} 2024-11-24T02:55:09,676 INFO [PEWorker-1 {}] hbase.MetaTableAccessor(843): Updated tableName=TestLogRolling-testLogRolling, state=ENABLED in hbase:meta 2024-11-24T02:55:09,677 INFO [PEWorker-1 {}] procedure.CreateTableProcedure(89): pid=4, state=RUNNABLE:CREATE_TABLE_POST_OPERATION, hasLock=true; CreateTableProcedure table=TestLogRolling-testLogRolling execute state=CREATE_TABLE_POST_OPERATION 2024-11-24T02:55:09,679 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=4, state=SUCCESS, hasLock=false; CreateTableProcedure table=TestLogRolling-testLogRolling in 371 msec 2024-11-24T02:55:10,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:10,360 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:10,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,386 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,387 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,388 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,390 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,390 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,408 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,411 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,412 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,414 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,919 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:55:10,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,920 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,921 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,922 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,947 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,948 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,949 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,956 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,957 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:10,961 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:11,361 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:11,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:12,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:12,362 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:13,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:13,363 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:13,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(131): Registering adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling 2024-11-24T02:55:13,405 INFO [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(135): Registering RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testLogRolling Metrics about Tables on a single HBase RegionServer 2024-11-24T02:55:13,405 DEBUG [HBase-Metrics2-1 {}] impl.GlobalMetricRegistriesAdapter(153): Removing adapter for the MetricRegistry: RegionServer,sub=TableRequests_Namespace_default_table_TestLogRolling-testCompactionRecordDoesntBlockRolling 2024-11-24T02:55:14,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:14,364 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:14,837 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'hbase:meta' 2024-11-24T02:55:14,838 DEBUG [HBase-Metrics2-1 {}] regionserver.MetricsTableSourceImpl(133): Creating new MetricsTableSourceImpl for table 'TestLogRolling-testLogRolling' 2024-11-24T02:55:15,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:15,365 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:16,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:16,366 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:17,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:17,367 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:18,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:18,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:18,908 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:55:18,909 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,910 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,911 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,912 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,936 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,937 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,940 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,941 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:18,943 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:19,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:19,368 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:19,382 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=41039 {}] master.MasterRpcServices(1377): Checking to see if procedure is done pid=4 2024-11-24T02:55:19,383 INFO [RPCClient-NioEventLoopGroup-4-7 {}] client.RawAsyncHBaseAdmin$TableProcedureBiConsumer(2721): Operation: CREATE, Table Name: default:TestLogRolling-testLogRolling completed 2024-11-24T02:55:19,383 DEBUG [Time-limited test {}] hbase.ClientMetaTableAccessor(255): Scanning META starting at row=TestLogRolling-testLogRolling,, stopping at row=TestLogRolling-testLogRolling ,, for max=2147483647 with caching=100 2024-11-24T02:55:19,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2234): Found 1 regions for table TestLogRolling-testLogRolling 2024-11-24T02:55:19,388 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(2240): firstRegionName=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:19,393 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0001', locateType=CURRENT is [region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., hostname=7c69a60bd8f6,34923,1732416908455, seqNum=2] 2024-11-24T02:55:19,409 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:19,409 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:55:19,428 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/5e1c3837f6e84de98d7fbe7577e3a377 is 1080, key is row0001/info:/1732416919395/Put/seqid=0 2024-11-24T02:55:19,435 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741837_1013 (size=12509) 2024-11-24T02:55:19,436 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741837_1013 (size=12509) 2024-11-24T02:55:19,436 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=11 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/5e1c3837f6e84de98d7fbe7577e3a377 2024-11-24T02:55:19,446 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/5e1c3837f6e84de98d7fbe7577e3a377 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/5e1c3837f6e84de98d7fbe7577e3a377 2024-11-24T02:55:19,453 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/5e1c3837f6e84de98d7fbe7577e3a377, entries=7, sequenceid=11, filesize=12.2 K 2024-11-24T02:55:19,454 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=18.91 KB/19368 for 768cc1871b22dcaa3f13dc427eba7bc8 in 45ms, sequenceid=11, compaction requested=false 2024-11-24T02:55:19,455 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:19,465 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:19,465 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-24T02:55:19,470 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/f162087c611b49f58aa65e0f563b7459 is 1080, key is row0008/info:/1732416919410/Put/seqid=0 2024-11-24T02:55:19,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741838_1014 (size=25453) 2024-11-24T02:55:19,477 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741838_1014 (size=25453) 2024-11-24T02:55:19,478 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=33 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/f162087c611b49f58aa65e0f563b7459 2024-11-24T02:55:19,484 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/f162087c611b49f58aa65e0f563b7459 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459 2024-11-24T02:55:19,490 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459, entries=19, sequenceid=33, filesize=24.9 K 2024-11-24T02:55:19,491 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=6.30 KB/6456 for 768cc1871b22dcaa3f13dc427eba7bc8 in 26ms, sequenceid=33, compaction requested=false 2024-11-24T02:55:19,491 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:19,492 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=37.1 K, sizeToCheck=16.0 K 2024-11-24T02:55:19,492 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:19,492 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459 because midkey is the same as first or last row 2024-11-24T02:55:20,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:20,370 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:21,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:21,372 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:21,487 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:21,487 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:55:21,495 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/be9ce91f72ef4725a0dd03be18ead20d is 1080, key is row0027/info:/1732416919467/Put/seqid=0 2024-11-24T02:55:21,501 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741839_1015 (size=12509) 2024-11-24T02:55:21,502 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741839_1015 (size=12509) 2024-11-24T02:55:21,503 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=43 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/be9ce91f72ef4725a0dd03be18ead20d 2024-11-24T02:55:21,509 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/be9ce91f72ef4725a0dd03be18ead20d as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/be9ce91f72ef4725a0dd03be18ead20d 2024-11-24T02:55:21,514 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/be9ce91f72ef4725a0dd03be18ead20d, entries=7, sequenceid=43, filesize=12.2 K 2024-11-24T02:55:21,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 768cc1871b22dcaa3f13dc427eba7bc8 in 28ms, sequenceid=43, compaction requested=true 2024-11-24T02:55:21,515 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:21,515 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:21,515 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=49.3 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,515 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,515 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459 because midkey is the same as first or last row 2024-11-24T02:55:21,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 768cc1871b22dcaa3f13dc427eba7bc8:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:21,515 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:21,515 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T02:55:21,515 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:21,517 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 50471 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:21,517 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 768cc1871b22dcaa3f13dc427eba7bc8/info is initiating minor compaction (all files) 2024-11-24T02:55:21,517 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 768cc1871b22dcaa3f13dc427eba7bc8/info in TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:21,517 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/5e1c3837f6e84de98d7fbe7577e3a377, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/be9ce91f72ef4725a0dd03be18ead20d] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp, totalSize=49.3 K 2024-11-24T02:55:21,518 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5e1c3837f6e84de98d7fbe7577e3a377, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=11, earliestPutTs=1732416919395 2024-11-24T02:55:21,518 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting f162087c611b49f58aa65e0f563b7459, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=33, earliestPutTs=1732416919410 2024-11-24T02:55:21,519 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting be9ce91f72ef4725a0dd03be18ead20d, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732416919467 2024-11-24T02:55:21,519 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/66c2fe292b9f4c59a028c2682fc5d2af is 1080, key is row0034/info:/1732416921490/Put/seqid=0 2024-11-24T02:55:21,524 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741840_1016 (size=16817) 2024-11-24T02:55:21,525 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741840_1016 (size=16817) 2024-11-24T02:55:21,526 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=57 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/66c2fe292b9f4c59a028c2682fc5d2af 2024-11-24T02:55:21,532 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/66c2fe292b9f4c59a028c2682fc5d2af as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/66c2fe292b9f4c59a028c2682fc5d2af 2024-11-24T02:55:21,533 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 768cc1871b22dcaa3f13dc427eba7bc8#info#compaction#59 average throughput is 16.93 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:21,534 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/810ae80d8cc04642a9942b3c134d3a04 is 1080, key is row0001/info:/1732416919395/Put/seqid=0 2024-11-24T02:55:21,538 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/66c2fe292b9f4c59a028c2682fc5d2af, entries=11, sequenceid=57, filesize=16.4 K 2024-11-24T02:55:21,539 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741841_1017 (size=40670) 2024-11-24T02:55:21,539 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 768cc1871b22dcaa3f13dc427eba7bc8 in 24ms, sequenceid=57, compaction requested=false 2024-11-24T02:55:21,540 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:21,540 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=65.7 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,540 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,540 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741841_1017 (size=40670) 2024-11-24T02:55:21,540 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459 because midkey is the same as first or last row 2024-11-24T02:55:21,541 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:21,541 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T02:55:21,546 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/e242c2fd112a449ca8a1a84388cba489 is 1080, key is row0045/info:/1732416921516/Put/seqid=0 2024-11-24T02:55:21,547 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/810ae80d8cc04642a9942b3c134d3a04 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04 2024-11-24T02:55:21,554 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 768cc1871b22dcaa3f13dc427eba7bc8/info of 768cc1871b22dcaa3f13dc427eba7bc8 into 810ae80d8cc04642a9942b3c134d3a04(size=39.7 K), total size for store is 56.1 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:21,554 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:21,554 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., storeName=768cc1871b22dcaa3f13dc427eba7bc8/info, priority=13, startTime=1732416921515; duration=0sec 2024-11-24T02:55:21,554 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,554 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,554 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04 because midkey is the same as first or last row 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741842_1018 (size=16817) 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04 because midkey is the same as first or last row 2024-11-24T02:55:21,555 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741842_1018 (size=16817) 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=56.1 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04 because midkey is the same as first or last row 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:21,555 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 768cc1871b22dcaa3f13dc427eba7bc8:info 2024-11-24T02:55:21,555 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=71 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/e242c2fd112a449ca8a1a84388cba489 2024-11-24T02:55:21,563 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/e242c2fd112a449ca8a1a84388cba489 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/e242c2fd112a449ca8a1a84388cba489 2024-11-24T02:55:21,569 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/e242c2fd112a449ca8a1a84388cba489, entries=11, sequenceid=71, filesize=16.4 K 2024-11-24T02:55:21,570 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 768cc1871b22dcaa3f13dc427eba7bc8 in 28ms, sequenceid=71, compaction requested=true 2024-11-24T02:55:21,570 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:21,570 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=72.6 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,570 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,570 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04 because midkey is the same as first or last row 2024-11-24T02:55:21,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 768cc1871b22dcaa3f13dc427eba7bc8:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:21,570 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:21,570 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:21,571 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 74304 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:21,571 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 768cc1871b22dcaa3f13dc427eba7bc8/info is initiating minor compaction (all files) 2024-11-24T02:55:21,571 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 768cc1871b22dcaa3f13dc427eba7bc8/info in TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:21,571 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/66c2fe292b9f4c59a028c2682fc5d2af, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/e242c2fd112a449ca8a1a84388cba489] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp, totalSize=72.6 K 2024-11-24T02:55:21,572 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 810ae80d8cc04642a9942b3c134d3a04, keycount=33, bloomtype=ROW, size=39.7 K, encoding=NONE, compression=NONE, seqNum=43, earliestPutTs=1732416919395 2024-11-24T02:55:21,572 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 66c2fe292b9f4c59a028c2682fc5d2af, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=57, earliestPutTs=1732416921490 2024-11-24T02:55:21,572 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting e242c2fd112a449ca8a1a84388cba489, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1732416921516 2024-11-24T02:55:21,582 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 768cc1871b22dcaa3f13dc427eba7bc8#info#compaction#61 average throughput is 28.22 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:21,583 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/28c11755a7234c588d09a929a16beca9 is 1080, key is row0001/info:/1732416919395/Put/seqid=0 2024-11-24T02:55:21,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741843_1019 (size=64535) 2024-11-24T02:55:21,588 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741843_1019 (size=64535) 2024-11-24T02:55:21,594 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/28c11755a7234c588d09a929a16beca9 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 2024-11-24T02:55:21,600 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 768cc1871b22dcaa3f13dc427eba7bc8/info of 768cc1871b22dcaa3f13dc427eba7bc8 into 28c11755a7234c588d09a929a16beca9(size=63.0 K), total size for store is 63.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:21,600 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., storeName=768cc1871b22dcaa3f13dc427eba7bc8/info, priority=13, startTime=1732416921570; duration=0sec 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 because midkey is the same as first or last row 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 because midkey is the same as first or last row 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=63.0 K, sizeToCheck=16.0 K 2024-11-24T02:55:21,600 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:21,601 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 because midkey is the same as first or last row 2024-11-24T02:55:21,601 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:21,601 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 768cc1871b22dcaa3f13dc427eba7bc8:info 2024-11-24T02:55:22,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:22,373 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:23,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:23,374 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:23,566 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,566 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=10.51 KB heapSize=11.50 KB 2024-11-24T02:55:23,572 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/31ab0e66d29f4c8cb3b747db7db7922b is 1080, key is row0056/info:/1732416921543/Put/seqid=0 2024-11-24T02:55:23,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741844_1020 (size=15740) 2024-11-24T02:55:23,577 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741844_1020 (size=15740) 2024-11-24T02:55:23,578 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=10.51 KB at sequenceid=86 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/31ab0e66d29f4c8cb3b747db7db7922b 2024-11-24T02:55:23,584 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/31ab0e66d29f4c8cb3b747db7db7922b as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/31ab0e66d29f4c8cb3b747db7db7922b 2024-11-24T02:55:23,590 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/31ab0e66d29f4c8cb3b747db7db7922b, entries=10, sequenceid=86, filesize=15.4 K 2024-11-24T02:55:23,591 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~10.51 KB/10760, heapSize ~11.48 KB/11760, currentSize=9.46 KB/9684 for 768cc1871b22dcaa3f13dc427eba7bc8 in 25ms, sequenceid=86, compaction requested=false 2024-11-24T02:55:23,591 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:23,591 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=78.4 K, sizeToCheck=16.0 K 2024-11-24T02:55:23,591 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:23,591 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 because midkey is the same as first or last row 2024-11-24T02:55:23,593 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,593 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T02:55:23,597 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/3f4c66bbeb794563b8ff2414103c3f15 is 1080, key is row0066/info:/1732416923568/Put/seqid=0 2024-11-24T02:55:23,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741845_1021 (size=16817) 2024-11-24T02:55:23,602 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741845_1021 (size=16817) 2024-11-24T02:55:23,602 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=100 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/3f4c66bbeb794563b8ff2414103c3f15 2024-11-24T02:55:23,609 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/3f4c66bbeb794563b8ff2414103c3f15 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/3f4c66bbeb794563b8ff2414103c3f15 2024-11-24T02:55:23,615 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/3f4c66bbeb794563b8ff2414103c3f15, entries=11, sequenceid=100, filesize=16.4 K 2024-11-24T02:55:23,616 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 768cc1871b22dcaa3f13dc427eba7bc8 in 23ms, sequenceid=100, compaction requested=true 2024-11-24T02:55:23,617 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:23,617 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=94.8 K, sizeToCheck=16.0 K 2024-11-24T02:55:23,617 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:23,617 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 because midkey is the same as first or last row 2024-11-24T02:55:23,617 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 768cc1871b22dcaa3f13dc427eba7bc8:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:23,617 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:23,617 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:23,617 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T02:55:23,618 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 97092 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:23,618 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 768cc1871b22dcaa3f13dc427eba7bc8/info is initiating minor compaction (all files) 2024-11-24T02:55:23,618 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 768cc1871b22dcaa3f13dc427eba7bc8/info in TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:23,618 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/31ab0e66d29f4c8cb3b747db7db7922b, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/3f4c66bbeb794563b8ff2414103c3f15] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp, totalSize=94.8 K 2024-11-24T02:55:23,619 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 28c11755a7234c588d09a929a16beca9, keycount=55, bloomtype=ROW, size=63.0 K, encoding=NONE, compression=NONE, seqNum=71, earliestPutTs=1732416919395 2024-11-24T02:55:23,619 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 31ab0e66d29f4c8cb3b747db7db7922b, keycount=10, bloomtype=ROW, size=15.4 K, encoding=NONE, compression=NONE, seqNum=86, earliestPutTs=1732416921543 2024-11-24T02:55:23,619 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 3f4c66bbeb794563b8ff2414103c3f15, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732416923568 2024-11-24T02:55:23,621 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/b762a624c8c141f6af4fc75f8c6cf423 is 1080, key is row0077/info:/1732416923594/Put/seqid=0 2024-11-24T02:55:23,640 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741846_1022 (size=16817) 2024-11-24T02:55:23,641 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741846_1022 (size=16817) 2024-11-24T02:55:23,641 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=114 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/b762a624c8c141f6af4fc75f8c6cf423 2024-11-24T02:55:23,643 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 768cc1871b22dcaa3f13dc427eba7bc8#info#compaction#65 average throughput is 26.00 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:23,643 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/7380b522f06e4a91beb0a8b0153fb242 is 1080, key is row0001/info:/1732416919395/Put/seqid=0 2024-11-24T02:55:23,648 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/b762a624c8c141f6af4fc75f8c6cf423 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/b762a624c8c141f6af4fc75f8c6cf423 2024-11-24T02:55:23,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741847_1023 (size=87327) 2024-11-24T02:55:23,654 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741847_1023 (size=87327) 2024-11-24T02:55:23,654 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/b762a624c8c141f6af4fc75f8c6cf423, entries=11, sequenceid=114, filesize=16.4 K 2024-11-24T02:55:23,656 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 768cc1871b22dcaa3f13dc427eba7bc8 in 39ms, sequenceid=114, compaction requested=false 2024-11-24T02:55:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=111.2 K, sizeToCheck=16.0 K 2024-11-24T02:55:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:23,656 DEBUG [MemStoreFlusher.0 {}] regionserver.StoreUtils(137): cannot split hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 because midkey is the same as first or last row 2024-11-24T02:55:23,662 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/7380b522f06e4a91beb0a8b0153fb242 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242 2024-11-24T02:55:23,672 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 768cc1871b22dcaa3f13dc427eba7bc8/info of 768cc1871b22dcaa3f13dc427eba7bc8 into 7380b522f06e4a91beb0a8b0153fb242(size=85.3 K), total size for store is 101.7 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:23,672 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 768cc1871b22dcaa3f13dc427eba7bc8: 2024-11-24T02:55:23,672 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., storeName=768cc1871b22dcaa3f13dc427eba7bc8/info, priority=13, startTime=1732416923617; duration=0sec 2024-11-24T02:55:23,672 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-11-24T02:55:23,672 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:23,672 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-11-24T02:55:23,672 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:23,672 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.ConstantSizeRegionSplitPolicy(101): Should split because region size is big enough sumSize=101.7 K, sizeToCheck=16.0 K 2024-11-24T02:55:23,672 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.IncreasingToUpperBoundRegionSplitPolicy(85): regionsWithCommonTable=1 2024-11-24T02:55:23,673 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit(239): Splitting TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:23,674 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:23,674 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 768cc1871b22dcaa3f13dc427eba7bc8:info 2024-11-24T02:55:23,675 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41039 {}] assignment.AssignmentManager(1363): Split request from 7c69a60bd8f6,34923,1732416908455, parent={ENCODED => 768cc1871b22dcaa3f13dc427eba7bc8, NAME => 'TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.', STARTKEY => '', ENDKEY => ''}, splitKey=row0062 2024-11-24T02:55:23,680 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41039 {}] assignment.SplitTableRegionProcedure(223): Splittable=true state=OPEN, location=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:23,684 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=41039 {}] procedure2.ProcedureExecutor(1139): Stored pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=768cc1871b22dcaa3f13dc427eba7bc8, daughterA=2eba1d2bd66767e0fb78fd29bbe1d40d, daughterB=3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:23,685 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=768cc1871b22dcaa3f13dc427eba7bc8, daughterA=2eba1d2bd66767e0fb78fd29bbe1d40d, daughterB=3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:23,685 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=768cc1871b22dcaa3f13dc427eba7bc8, daughterA=2eba1d2bd66767e0fb78fd29bbe1d40d, daughterB=3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:23,685 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=7, state=RUNNABLE:SPLIT_TABLE_REGION_PREPARE, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=768cc1871b22dcaa3f13dc427eba7bc8, daughterA=2eba1d2bd66767e0fb78fd29bbe1d40d, daughterB=3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:23,693 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, UNASSIGN}] 2024-11-24T02:55:23,694 INFO [PEWorker-3 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, UNASSIGN 2024-11-24T02:55:23,696 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=768cc1871b22dcaa3f13dc427eba7bc8, regionState=CLOSING, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:23,698 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=8, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_CLOSE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, UNASSIGN because future has completed 2024-11-24T02:55:23,699 DEBUG [PEWorker-4 {}] assignment.TransitRegionStateProcedure(375): Close region: isSplit: true: evictOnSplit: true: evictOnClose: false 2024-11-24T02:55:23,699 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 768cc1871b22dcaa3f13dc427eba7bc8, server=7c69a60bd8f6,34923,1732416908455}] 2024-11-24T02:55:23,857 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(122): Close 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,857 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(136): Unassign region: split region: true: evictCache: true 2024-11-24T02:55:23,858 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1722): Closing 768cc1871b22dcaa3f13dc427eba7bc8, disabling compactions & flushes 2024-11-24T02:55:23,858 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:23,858 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:23,858 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. after waiting 0 ms 2024-11-24T02:55:23,858 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:23,858 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(2902): Flushing 768cc1871b22dcaa3f13dc427eba7bc8 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-24T02:55:23,864 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/7cf3c6cc30994cfbb8a71a3174766d97 is 1080, key is row0088/info:/1732416923618/Put/seqid=0 2024-11-24T02:55:23,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741848_1024 (size=14663) 2024-11-24T02:55:23,869 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741848_1024 (size=14663) 2024-11-24T02:55:23,869 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=127 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/7cf3c6cc30994cfbb8a71a3174766d97 2024-11-24T02:55:23,875 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/.tmp/info/7cf3c6cc30994cfbb8a71a3174766d97 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7cf3c6cc30994cfbb8a71a3174766d97 2024-11-24T02:55:23,879 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7cf3c6cc30994cfbb8a71a3174766d97, entries=9, sequenceid=127, filesize=14.3 K 2024-11-24T02:55:23,880 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 768cc1871b22dcaa3f13dc427eba7bc8 in 22ms, sequenceid=127, compaction requested=true 2024-11-24T02:55:23,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/5e1c3837f6e84de98d7fbe7577e3a377, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/be9ce91f72ef4725a0dd03be18ead20d, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/66c2fe292b9f4c59a028c2682fc5d2af, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/e242c2fd112a449ca8a1a84388cba489, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/31ab0e66d29f4c8cb3b747db7db7922b, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/3f4c66bbeb794563b8ff2414103c3f15] to archive 2024-11-24T02:55:23,882 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T02:55:23,884 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/5e1c3837f6e84de98d7fbe7577e3a377 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/5e1c3837f6e84de98d7fbe7577e3a377 2024-11-24T02:55:23,885 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/f162087c611b49f58aa65e0f563b7459 2024-11-24T02:55:23,886 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/810ae80d8cc04642a9942b3c134d3a04 2024-11-24T02:55:23,887 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/be9ce91f72ef4725a0dd03be18ead20d to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/be9ce91f72ef4725a0dd03be18ead20d 2024-11-24T02:55:23,888 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/66c2fe292b9f4c59a028c2682fc5d2af to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/66c2fe292b9f4c59a028c2682fc5d2af 2024-11-24T02:55:23,890 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/28c11755a7234c588d09a929a16beca9 2024-11-24T02:55:23,891 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/e242c2fd112a449ca8a1a84388cba489 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/e242c2fd112a449ca8a1a84388cba489 2024-11-24T02:55:23,893 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/31ab0e66d29f4c8cb3b747db7db7922b to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/31ab0e66d29f4c8cb3b747db7db7922b 2024-11-24T02:55:23,894 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/3f4c66bbeb794563b8ff2414103c3f15 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/3f4c66bbeb794563b8ff2414103c3f15 2024-11-24T02:55:23,901 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=1 2024-11-24T02:55:23,902 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. 2024-11-24T02:55:23,902 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] regionserver.HRegion(1676): Region close journal for 768cc1871b22dcaa3f13dc427eba7bc8: Waiting for close lock at 1732416923858Running coprocessor pre-close hooks at 1732416923858Disabling compacts and flushes for region at 1732416923858Disabling writes for close at 1732416923858Obtaining lock to block concurrent updates at 1732416923858Preparing flush snapshotting stores in 768cc1871b22dcaa3f13dc427eba7bc8 at 1732416923858Finished memstore snapshotting TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., syncing WAL and waiting on mvcc, flushsize=dataSize=9684, getHeapSize=10608, getOffHeapSize=0, getCellsCount=9 at 1732416923859 (+1 ms)Flushing stores of TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. at 1732416923860 (+1 ms)Flushing 768cc1871b22dcaa3f13dc427eba7bc8/info: creating writer at 1732416923860Flushing 768cc1871b22dcaa3f13dc427eba7bc8/info: appending metadata at 1732416923863 (+3 ms)Flushing 768cc1871b22dcaa3f13dc427eba7bc8/info: closing flushed file at 1732416923863Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@3bf5054e: reopening flushed file at 1732416923874 (+11 ms)Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 768cc1871b22dcaa3f13dc427eba7bc8 in 22ms, sequenceid=127, compaction requested=true at 1732416923880 (+6 ms)Writing region close event to WAL at 1732416923897 (+17 ms)Running coprocessor post-close hooks at 1732416923902 (+5 ms)Closed at 1732416923902 2024-11-24T02:55:23,904 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION, pid=9}] handler.UnassignRegionHandler(157): Closed 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,905 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=8 updating hbase:meta row=768cc1871b22dcaa3f13dc427eba7bc8, regionState=CLOSED 2024-11-24T02:55:23,907 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=9, ppid=8, state=RUNNABLE, hasLock=false; CloseRegionProcedure 768cc1871b22dcaa3f13dc427eba7bc8, server=7c69a60bd8f6,34923,1732416908455 because future has completed 2024-11-24T02:55:23,913 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=9, resume processing ppid=8 2024-11-24T02:55:23,913 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=9, ppid=8, state=SUCCESS, hasLock=false; CloseRegionProcedure 768cc1871b22dcaa3f13dc427eba7bc8, server=7c69a60bd8f6,34923,1732416908455 in 211 msec 2024-11-24T02:55:23,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=8, resume processing ppid=7 2024-11-24T02:55:23,916 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1521): Finished pid=8, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=768cc1871b22dcaa3f13dc427eba7bc8, UNASSIGN in 220 msec 2024-11-24T02:55:23,924 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:23,929 INFO [PEWorker-4 {}] assignment.SplitTableRegionProcedure(728): pid=7 splitting 3 storefiles, region=768cc1871b22dcaa3f13dc427eba7bc8, threads=3 2024-11-24T02:55:23,931 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242 for region: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,931 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/b762a624c8c141f6af4fc75f8c6cf423 for region: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,931 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(823): pid=7 splitting started for store file: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7cf3c6cc30994cfbb8a71a3174766d97 for region: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,940 DEBUG [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/b762a624c8c141f6af4fc75f8c6cf423, top=true 2024-11-24T02:55:23,940 DEBUG [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(650): Will create HFileLink file for hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7cf3c6cc30994cfbb8a71a3174766d97, top=true 2024-11-24T02:55:23,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741849_1025 (size=27) 2024-11-24T02:55:23,945 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741849_1025 (size=27) 2024-11-24T02:55:23,946 INFO [StoreFileSplitter-pool-1 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97 for child: 3dae74ba38fd91625fd0144f5a825658, parent: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,946 INFO [StoreFileSplitter-pool-2 {}] regionserver.HRegionFileSystem(691): Created linkFile:hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423 for child: 3dae74ba38fd91625fd0144f5a825658, parent: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,946 DEBUG [StoreFileSplitter-pool-2 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/b762a624c8c141f6af4fc75f8c6cf423 for region: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,946 DEBUG [StoreFileSplitter-pool-1 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7cf3c6cc30994cfbb8a71a3174766d97 for region: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741850_1026 (size=27) 2024-11-24T02:55:23,958 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741850_1026 (size=27) 2024-11-24T02:55:23,958 DEBUG [StoreFileSplitter-pool-0 {}] assignment.SplitTableRegionProcedure(834): pid=7 splitting complete for store file: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242 for region: 768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:23,961 DEBUG [PEWorker-4 {}] assignment.SplitTableRegionProcedure(802): pid=7 split storefiles for region 768cc1871b22dcaa3f13dc427eba7bc8 Daughter A: [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8] storefiles, Daughter B: [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423] storefiles. 2024-11-24T02:55:23,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741851_1027 (size=71) 2024-11-24T02:55:23,968 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741851_1027 (size=71) 2024-11-24T02:55:23,970 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:23,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741852_1028 (size=71) 2024-11-24T02:55:23,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741852_1028 (size=71) 2024-11-24T02:55:23,984 DEBUG [PEWorker-4 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:23,993 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-24T02:55:23,995 DEBUG [PEWorker-4 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/recovered.edits/130.seqid, newMaxSeqId=130, maxSeqId=-1 2024-11-24T02:55:23,997 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8.","families":{"info":[{"qualifier":"regioninfo","vlen":63,"tag":[],"timestamp":"1732416923996"},{"qualifier":"splitA","vlen":70,"tag":[],"timestamp":"1732416923996"},{"qualifier":"splitB","vlen":70,"tag":[],"timestamp":"1732416923996"}]},"ts":"1732416923996"} 2024-11-24T02:55:23,997 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732416923996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416923996"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732416923996"}]},"ts":"1732416923996"} 2024-11-24T02:55:23,997 DEBUG [PEWorker-4 {}] assignment.RegionStateStore(723): Put {"totalColumns":3,"row":"TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.","families":{"info":[{"qualifier":"regioninfo","vlen":70,"tag":[],"timestamp":"1732416923996"},{"qualifier":"state","vlen":6,"tag":[],"timestamp":"1732416923996"},{"qualifier":"seqnumDuringOpen","vlen":8,"tag":[],"timestamp":"1732416923996"}]},"ts":"1732416923996"} 2024-11-24T02:55:24,012 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eba1d2bd66767e0fb78fd29bbe1d40d, ASSIGN}, {pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3dae74ba38fd91625fd0144f5a825658, ASSIGN}] 2024-11-24T02:55:24,014 INFO [PEWorker-5 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3dae74ba38fd91625fd0144f5a825658, ASSIGN 2024-11-24T02:55:24,014 INFO [PEWorker-1 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eba1d2bd66767e0fb78fd29bbe1d40d, ASSIGN 2024-11-24T02:55:24,015 INFO [PEWorker-5 {}] assignment.TransitRegionStateProcedure(269): Starting pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3dae74ba38fd91625fd0144f5a825658, ASSIGN; state=SPLITTING_NEW, location=7c69a60bd8f6,34923,1732416908455; forceNewPlan=false, retain=false 2024-11-24T02:55:24,015 INFO [PEWorker-1 {}] assignment.TransitRegionStateProcedure(269): Starting pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eba1d2bd66767e0fb78fd29bbe1d40d, ASSIGN; state=SPLITTING_NEW, location=7c69a60bd8f6,34923,1732416908455; forceNewPlan=false, retain=false 2024-11-24T02:55:24,166 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=3dae74ba38fd91625fd0144f5a825658, regionState=OPENING, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:24,166 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=2eba1d2bd66767e0fb78fd29bbe1d40d, regionState=OPENING, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:24,170 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=11, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3dae74ba38fd91625fd0144f5a825658, ASSIGN because future has completed 2024-11-24T02:55:24,171 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3dae74ba38fd91625fd0144f5a825658, server=7c69a60bd8f6,34923,1732416908455}] 2024-11-24T02:55:24,172 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=10, ppid=7, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eba1d2bd66767e0fb78fd29bbe1d40d, ASSIGN because future has completed 2024-11-24T02:55:24,174 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2eba1d2bd66767e0fb78fd29bbe1d40d, server=7c69a60bd8f6,34923,1732416908455}] 2024-11-24T02:55:24,333 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:24,334 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7752): Opening region: {ENCODED => 3dae74ba38fd91625fd0144f5a825658, NAME => 'TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.', STARTKEY => 'row0062', ENDKEY => ''} 2024-11-24T02:55:24,335 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,335 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:24,335 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7794): checking encryption for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,335 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(7797): checking classloading for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,338 INFO [StoreOpener-3dae74ba38fd91625fd0144f5a825658-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,339 INFO [StoreOpener-3dae74ba38fd91625fd0144f5a825658-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 3dae74ba38fd91625fd0144f5a825658 columnFamilyName info 2024-11-24T02:55:24,339 DEBUG [StoreOpener-3dae74ba38fd91625fd0144f5a825658-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:24,355 DEBUG [StoreOpener-3dae74ba38fd91625fd0144f5a825658-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8->hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242-top 2024-11-24T02:55:24,360 DEBUG [StoreOpener-3dae74ba38fd91625fd0144f5a825658-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97 2024-11-24T02:55:24,364 DEBUG [StoreOpener-3dae74ba38fd91625fd0144f5a825658-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423 2024-11-24T02:55:24,364 INFO [StoreOpener-3dae74ba38fd91625fd0144f5a825658-1 {}] regionserver.HStore(327): Store=3dae74ba38fd91625fd0144f5a825658/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:24,364 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1038): replaying wal for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,365 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,366 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,367 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1048): stopping wal replay for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,367 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1060): Cleaning up temporary data for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,368 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1093): writing seq id for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,369 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1114): Opened 3dae74ba38fd91625fd0144f5a825658; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=730712, jitterRate=-0.07085289061069489}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:55:24,369 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:24,370 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegion(1006): Region open journal for 3dae74ba38fd91625fd0144f5a825658: Running coprocessor pre-open hook at 1732416924335Writing region info on filesystem at 1732416924335Initializing all the Stores at 1732416924337 (+2 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416924337Cleaning up temporary data from old regions at 1732416924367 (+30 ms)Running coprocessor post-open hooks at 1732416924369 (+2 ms)Region opened successfully at 1732416924369 2024-11-24T02:55:24,370 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., pid=12, masterSystemTime=1732416924325 2024-11-24T02:55:24,371 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(403): Add compact mark for store 3dae74ba38fd91625fd0144f5a825658:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:24,371 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:24,371 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:24,372 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:24,372 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 3dae74ba38fd91625fd0144f5a825658/info is initiating minor compaction (all files) 2024-11-24T02:55:24,372 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3dae74ba38fd91625fd0144f5a825658/info in TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:24,372 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8->hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242-top, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp, totalSize=116.0 K 2024-11-24T02:55:24,373 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=101, earliestPutTs=1732416919395 2024-11-24T02:55:24,373 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:24,373 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=12}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:24,373 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(132): Open TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:24,373 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=114, earliestPutTs=1732416923594 2024-11-24T02:55:24,373 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7752): Opening region: {ENCODED => 2eba1d2bd66767e0fb78fd29bbe1d40d, NAME => 'TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.', STARTKEY => '', ENDKEY => 'row0062'} 2024-11-24T02:55:24,373 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table TestLogRolling-testLogRolling 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,373 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732416923618 2024-11-24T02:55:24,373 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(898): Instantiated TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:24,373 INFO [PEWorker-2 {}] assignment.RegionStateStore(223): pid=11 updating hbase:meta row=3dae74ba38fd91625fd0144f5a825658, regionState=OPEN, openSeqNum=131, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:24,373 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7794): checking encryption for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,373 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(7797): checking classloading for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,375 INFO [StoreOpener-2eba1d2bd66767e0fb78fd29bbe1d40d-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,375 DEBUG [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 1588230740 2024-11-24T02:55:24,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:24,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:24,375 DEBUG [MemStoreFlusher.0 {}] regionserver.FlushAllLargeStoresPolicy(69): Since none of the CFs were above the size, flushing all. 2024-11-24T02:55:24,375 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=5.15 KB heapSize=9 KB 2024-11-24T02:55:24,375 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=12, ppid=11, state=RUNNABLE, hasLock=false; OpenRegionProcedure 3dae74ba38fd91625fd0144f5a825658, server=7c69a60bd8f6,34923,1732416908455 because future has completed 2024-11-24T02:55:24,376 INFO [StoreOpener-2eba1d2bd66767e0fb78fd29bbe1d40d-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 2eba1d2bd66767e0fb78fd29bbe1d40d columnFamilyName info 2024-11-24T02:55:24,376 DEBUG [StoreOpener-2eba1d2bd66767e0fb78fd29bbe1d40d-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:24,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=12, resume processing ppid=11 2024-11-24T02:55:24,380 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=12, ppid=11, state=SUCCESS, hasLock=false; OpenRegionProcedure 3dae74ba38fd91625fd0144f5a825658, server=7c69a60bd8f6,34923,1732416908455 in 206 msec 2024-11-24T02:55:24,382 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=11, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=3dae74ba38fd91625fd0144f5a825658, ASSIGN in 368 msec 2024-11-24T02:55:24,393 DEBUG [StoreOpener-2eba1d2bd66767e0fb78fd29bbe1d40d-1 {}] regionserver.StoreEngine(278): loaded hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8->hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242-bottom 2024-11-24T02:55:24,393 INFO [StoreOpener-2eba1d2bd66767e0fb78fd29bbe1d40d-1 {}] regionserver.HStore(327): Store=2eba1d2bd66767e0fb78fd29bbe1d40d/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:24,393 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1038): replaying wal for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,394 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,395 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,395 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/info/818edc442b3142c9ad905ff8098a1469 is 193, key is TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658./info:regioninfo/1732416924373/Put/seqid=0 2024-11-24T02:55:24,396 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1048): stopping wal replay for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,396 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1060): Cleaning up temporary data for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,398 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1093): writing seq id for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,399 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1114): Opened 2eba1d2bd66767e0fb78fd29bbe1d40d; next sequenceid=131; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=800478, jitterRate=0.017861396074295044}}}, FlushLargeStoresPolicy{flushSizeLowerBound=-1} 2024-11-24T02:55:24,399 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:24,399 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegion(1006): Region open journal for 2eba1d2bd66767e0fb78fd29bbe1d40d: Running coprocessor pre-open hook at 1732416924374Writing region info on filesystem at 1732416924374Initializing all the Stores at 1732416924374Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416924374Cleaning up temporary data from old regions at 1732416924396 (+22 ms)Running coprocessor post-open hooks at 1732416924399 (+3 ms)Region opened successfully at 1732416924399 2024-11-24T02:55:24,400 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2236): Post open deploy tasks for TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d., pid=13, masterSystemTime=1732416924325 2024-11-24T02:55:24,400 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(403): Add compact mark for store 2eba1d2bd66767e0fb78fd29bbe1d40d:info, priority=-2147483648, current under compaction store size is 2 2024-11-24T02:55:24,400 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: Opening Region; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:24,400 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 1 store files, 0 compacting, 1 eligible, 16 blocking 2024-11-24T02:55:24,400 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HStore(1527): Keeping/Overriding Compaction request priority to -2147482648 for CF info since it belongs to recently split daughter region TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:24,400 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HStore(1541): 2eba1d2bd66767e0fb78fd29bbe1d40d/info is initiating minor compaction (all files) 2024-11-24T02:55:24,401 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3dae74ba38fd91625fd0144f5a825658#info#compaction#68 average throughput is 35.92 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:24,401 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 2eba1d2bd66767e0fb78fd29bbe1d40d/info in TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:24,401 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8->hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242-bottom] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/.tmp, totalSize=85.3 K 2024-11-24T02:55:24,401 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/336af985745f477d92d78cef15c08698 is 1080, key is row0062/info:/1732416921557/Put/seqid=0 2024-11-24T02:55:24,401 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] compactions.Compactor(225): Compacting 7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8, keycount=38, bloomtype=ROW, size=85.3 K, encoding=NONE, compression=NONE, seqNum=100, earliestPutTs=1732416919395 2024-11-24T02:55:24,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741853_1029 (size=9882) 2024-11-24T02:55:24,405 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741853_1029 (size=9882) 2024-11-24T02:55:24,405 DEBUG [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] regionserver.HRegionServer(2266): Finished post open deploy task for TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:24,406 INFO [RS_OPEN_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_REGION, pid=13}] handler.AssignRegionHandler(153): Opened TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:24,406 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=4.95 KB at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/info/818edc442b3142c9ad905ff8098a1469 2024-11-24T02:55:24,407 INFO [PEWorker-1 {}] assignment.RegionStateStore(223): pid=10 updating hbase:meta row=2eba1d2bd66767e0fb78fd29bbe1d40d, regionState=OPEN, openSeqNum=131, regionLocation=7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:24,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741854_1030 (size=42984) 2024-11-24T02:55:24,408 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741854_1030 (size=42984) 2024-11-24T02:55:24,409 DEBUG [MiniHBaseClusterRegionServer-EventLoopGroup-15-2 {}] procedure2.ProcedureFutureUtil(82): Going to wake up procedure pid=13, ppid=10, state=RUNNABLE, hasLock=false; OpenRegionProcedure 2eba1d2bd66767e0fb78fd29bbe1d40d, server=7c69a60bd8f6,34923,1732416908455 because future has completed 2024-11-24T02:55:24,413 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/336af985745f477d92d78cef15c08698 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/336af985745f477d92d78cef15c08698 2024-11-24T02:55:24,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=13, resume processing ppid=10 2024-11-24T02:55:24,416 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=13, ppid=10, state=SUCCESS, hasLock=false; OpenRegionProcedure 2eba1d2bd66767e0fb78fd29bbe1d40d, server=7c69a60bd8f6,34923,1732416908455 in 240 msec 2024-11-24T02:55:24,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=10, resume processing ppid=7 2024-11-24T02:55:24,419 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=10, ppid=7, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=TestLogRolling-testLogRolling, region=2eba1d2bd66767e0fb78fd29bbe1d40d, ASSIGN in 404 msec 2024-11-24T02:55:24,419 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3dae74ba38fd91625fd0144f5a825658/info of 3dae74ba38fd91625fd0144f5a825658 into 336af985745f477d92d78cef15c08698(size=42.0 K), total size for store is 42.0 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:24,419 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:24,420 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., storeName=3dae74ba38fd91625fd0144f5a825658/info, priority=13, startTime=1732416924371; duration=0sec 2024-11-24T02:55:24,420 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:24,420 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3dae74ba38fd91625fd0144f5a825658:info 2024-11-24T02:55:24,421 INFO [PEWorker-4 {}] procedure2.ProcedureExecutor(1521): Finished pid=7, state=SUCCESS, hasLock=false; SplitTableRegionProcedure table=TestLogRolling-testLogRolling, parent=768cc1871b22dcaa3f13dc427eba7bc8, daughterA=2eba1d2bd66767e0fb78fd29bbe1d40d, daughterB=3dae74ba38fd91625fd0144f5a825658 in 739 msec 2024-11-24T02:55:24,423 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 2eba1d2bd66767e0fb78fd29bbe1d40d#info#compaction#69 average throughput is 20.87 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:24,423 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/.tmp/info/d12b39f2b2d54e219cc740f039312c51 is 1080, key is row0001/info:/1732416919395/Put/seqid=0 2024-11-24T02:55:24,429 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/ns/7f88ced938d0433295ceb9a7215258dc is 43, key is default/ns:d/1732416909260/Put/seqid=0 2024-11-24T02:55:24,429 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741855_1031 (size=70862) 2024-11-24T02:55:24,430 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741855_1031 (size=70862) 2024-11-24T02:55:24,433 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741856_1032 (size=5153) 2024-11-24T02:55:24,434 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741856_1032 (size=5153) 2024-11-24T02:55:24,434 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/ns/7f88ced938d0433295ceb9a7215258dc 2024-11-24T02:55:24,436 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/.tmp/info/d12b39f2b2d54e219cc740f039312c51 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/info/d12b39f2b2d54e219cc740f039312c51 2024-11-24T02:55:24,441 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 1 (all) file(s) in 2eba1d2bd66767e0fb78fd29bbe1d40d/info of 2eba1d2bd66767e0fb78fd29bbe1d40d into d12b39f2b2d54e219cc740f039312c51(size=69.2 K), total size for store is 69.2 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:24,441 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 2eba1d2bd66767e0fb78fd29bbe1d40d: 2024-11-24T02:55:24,441 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d., storeName=2eba1d2bd66767e0fb78fd29bbe1d40d/info, priority=15, startTime=1732416924400; duration=0sec 2024-11-24T02:55:24,441 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:24,441 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 2eba1d2bd66767e0fb78fd29bbe1d40d:info 2024-11-24T02:55:24,451 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/table/e8eaa027e9b64d1f8ca5bd184e06cb59 is 65, key is TestLogRolling-testLogRolling/table:state/1732416909673/Put/seqid=0 2024-11-24T02:55:24,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741857_1033 (size=5340) 2024-11-24T02:55:24,455 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741857_1033 (size=5340) 2024-11-24T02:55:24,456 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=122 B at sequenceid=17 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/table/e8eaa027e9b64d1f8ca5bd184e06cb59 2024-11-24T02:55:24,462 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/info/818edc442b3142c9ad905ff8098a1469 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/info/818edc442b3142c9ad905ff8098a1469 2024-11-24T02:55:24,468 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/info/818edc442b3142c9ad905ff8098a1469, entries=30, sequenceid=17, filesize=9.7 K 2024-11-24T02:55:24,469 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/ns/7f88ced938d0433295ceb9a7215258dc as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/ns/7f88ced938d0433295ceb9a7215258dc 2024-11-24T02:55:24,474 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/ns/7f88ced938d0433295ceb9a7215258dc, entries=2, sequenceid=17, filesize=5.0 K 2024-11-24T02:55:24,475 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/table/e8eaa027e9b64d1f8ca5bd184e06cb59 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/table/e8eaa027e9b64d1f8ca5bd184e06cb59 2024-11-24T02:55:24,480 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/table/e8eaa027e9b64d1f8ca5bd184e06cb59, entries=2, sequenceid=17, filesize=5.2 K 2024-11-24T02:55:24,481 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~5.15 KB/5269, heapSize ~8.70 KB/8912, currentSize=670 B/670 for 1588230740 in 106ms, sequenceid=17, compaction requested=false 2024-11-24T02:55:24,481 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T02:55:25,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:25,375 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:25,640 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] ipc.CallRunner(138): callId: 102 service: ClientService methodName: Mutate size: 1.2 K connection: 172.17.0.2:34530 deadline: 1732416935637, exception=org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. is not online on 7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:25,658 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(64): Try updating region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., hostname=7c69a60bd8f6,34923,1732416908455, seqNum=2 , the old value is region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., hostname=7c69a60bd8f6,34923,1732416908455, seqNum=2, error=org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. is not online on 7c69a60bd8f6,34923,1732416908455 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T02:55:25,659 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(72): The actual exception when updating region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., hostname=7c69a60bd8f6,34923,1732416908455, seqNum=2 is org.apache.hadoop.hbase.NotServingRegionException: org.apache.hadoop.hbase.NotServingRegionException: TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8. is not online on 7c69a60bd8f6,34923,1732416908455 at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegionByEncodedName(HRegionServer.java:3186) at org.apache.hadoop.hbase.regionserver.HRegionServer.getRegion(HRegionServer.java:3164) at org.apache.hadoop.hbase.regionserver.RSRpcServices.getRegion(RSRpcServices.java:1413) at org.apache.hadoop.hbase.regionserver.RSRpcServices.mutate(RSRpcServices.java:2943) at org.apache.hadoop.hbase.shaded.protobuf.generated.ClientProtos$ClientService$2.callBlockingMethod(ClientProtos.java:43506) at org.apache.hadoop.hbase.ipc.RpcServer.call(RpcServer.java:444) at org.apache.hadoop.hbase.ipc.CallRunner.run(CallRunner.java:124) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:102) at org.apache.hadoop.hbase.ipc.RpcHandler.run(RpcHandler.java:82) 2024-11-24T02:55:25,659 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncRegionLocatorHelper(88): Try removing region=TestLogRolling-testLogRolling,,1732416909306.768cc1871b22dcaa3f13dc427eba7bc8., hostname=7c69a60bd8f6,34923,1732416908455, seqNum=2 from cache 2024-11-24T02:55:26,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:26,377 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:27,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:27,378 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:28,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:28,379 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:29,380 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:29,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:29,404 WARN [HBase-Metrics2-1 {}] impl.MetricsConfig(138): Cannot locate configuration: tried hadoop-metrics2-datanode.properties,hadoop-metrics2.properties 2024-11-24T02:55:29,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,405 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,406 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,407 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,434 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,435 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,436 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,440 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,441 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:29,444 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:30,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:30,381 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:31,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:31,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:32,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:32,382 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:33,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:33,383 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:34,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:34,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:35,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:35,384 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:35,688 DEBUG [RPCClient-NioEventLoopGroup-4-6 {}] client.AsyncNonMetaRegionLocator(310): The fetched location of 'TestLogRolling-testLogRolling', row='row0097', locateType=CURRENT is [region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., hostname=7c69a60bd8f6,34923,1732416908455, seqNum=131] 2024-11-24T02:55:35,707 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:35,708 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:55:35,714 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/5629f47265c140e99fffa8f9bddb039f is 1080, key is row0097/info:/1732416935691/Put/seqid=0 2024-11-24T02:55:35,721 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741858_1034 (size=12516) 2024-11-24T02:55:35,722 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741858_1034 (size=12516) 2024-11-24T02:55:35,723 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=141 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/5629f47265c140e99fffa8f9bddb039f 2024-11-24T02:55:35,733 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/5629f47265c140e99fffa8f9bddb039f as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/5629f47265c140e99fffa8f9bddb039f 2024-11-24T02:55:35,751 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/5629f47265c140e99fffa8f9bddb039f, entries=7, sequenceid=141, filesize=12.2 K 2024-11-24T02:55:35,753 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=15.76 KB/16140 for 3dae74ba38fd91625fd0144f5a825658 in 46ms, sequenceid=141, compaction requested=false 2024-11-24T02:55:35,753 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:35,755 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:35,755 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=17.86 KB heapSize=19.38 KB 2024-11-24T02:55:35,760 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/cb57b581d90144b38730a4de18dafb39 is 1080, key is row0104/info:/1732416935709/Put/seqid=0 2024-11-24T02:55:35,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741859_1035 (size=23316) 2024-11-24T02:55:35,777 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741859_1035 (size=23316) 2024-11-24T02:55:35,778 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=17.86 KB at sequenceid=161 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/cb57b581d90144b38730a4de18dafb39 2024-11-24T02:55:35,786 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/cb57b581d90144b38730a4de18dafb39 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/cb57b581d90144b38730a4de18dafb39 2024-11-24T02:55:35,791 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/cb57b581d90144b38730a4de18dafb39, entries=17, sequenceid=161, filesize=22.8 K 2024-11-24T02:55:35,792 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~17.86 KB/18292, heapSize ~19.36 KB/19824, currentSize=8.41 KB/8608 for 3dae74ba38fd91625fd0144f5a825658 in 37ms, sequenceid=161, compaction requested=true 2024-11-24T02:55:35,793 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:35,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3dae74ba38fd91625fd0144f5a825658:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:35,793 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:35,793 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:35,794 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 78816 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:35,794 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 3dae74ba38fd91625fd0144f5a825658/info is initiating minor compaction (all files) 2024-11-24T02:55:35,794 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3dae74ba38fd91625fd0144f5a825658/info in TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:35,794 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/336af985745f477d92d78cef15c08698, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/5629f47265c140e99fffa8f9bddb039f, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/cb57b581d90144b38730a4de18dafb39] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp, totalSize=77.0 K 2024-11-24T02:55:35,795 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 336af985745f477d92d78cef15c08698, keycount=35, bloomtype=ROW, size=42.0 K, encoding=NONE, compression=NONE, seqNum=127, earliestPutTs=1732416921557 2024-11-24T02:55:35,795 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 5629f47265c140e99fffa8f9bddb039f, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=141, earliestPutTs=1732416935691 2024-11-24T02:55:35,795 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting cb57b581d90144b38730a4de18dafb39, keycount=17, bloomtype=ROW, size=22.8 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732416935709 2024-11-24T02:55:35,811 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3dae74ba38fd91625fd0144f5a825658#info#compaction#74 average throughput is 30.27 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:35,811 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/09f3a72da5154e1da7374e45d2d8b542 is 1080, key is row0062/info:/1732416921557/Put/seqid=0 2024-11-24T02:55:35,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741860_1036 (size=69026) 2024-11-24T02:55:35,817 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741860_1036 (size=69026) 2024-11-24T02:55:35,824 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/09f3a72da5154e1da7374e45d2d8b542 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/09f3a72da5154e1da7374e45d2d8b542 2024-11-24T02:55:35,831 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3dae74ba38fd91625fd0144f5a825658/info of 3dae74ba38fd91625fd0144f5a825658 into 09f3a72da5154e1da7374e45d2d8b542(size=67.4 K), total size for store is 67.4 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:35,831 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:35,831 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., storeName=3dae74ba38fd91625fd0144f5a825658/info, priority=13, startTime=1732416935793; duration=0sec 2024-11-24T02:55:35,831 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:35,831 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3dae74ba38fd91625fd0144f5a825658:info 2024-11-24T02:55:36,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:36,385 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:37,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:37,386 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:37,778 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:37,778 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-24T02:55:37,783 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/e4574ef132934e478f1878428376facc is 1080, key is row0121/info:/1732416935758/Put/seqid=0 2024-11-24T02:55:37,797 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741861_1037 (size=14672) 2024-11-24T02:55:37,798 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741861_1037 (size=14672) 2024-11-24T02:55:37,799 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=174 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/e4574ef132934e478f1878428376facc 2024-11-24T02:55:37,805 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/e4574ef132934e478f1878428376facc as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/e4574ef132934e478f1878428376facc 2024-11-24T02:55:37,811 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/e4574ef132934e478f1878428376facc, entries=9, sequenceid=174, filesize=14.3 K 2024-11-24T02:55:37,812 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=13.66 KB/13988 for 3dae74ba38fd91625fd0144f5a825658 in 34ms, sequenceid=174, compaction requested=false 2024-11-24T02:55:37,812 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:37,814 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:37,814 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=15.76 KB heapSize=17.13 KB 2024-11-24T02:55:37,818 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/71bc943b7c56471d88ab98ec8b6316d1 is 1080, key is row0130/info:/1732416937780/Put/seqid=0 2024-11-24T02:55:37,825 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741862_1038 (size=21156) 2024-11-24T02:55:37,826 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741862_1038 (size=21156) 2024-11-24T02:55:37,826 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=15.76 KB at sequenceid=192 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/71bc943b7c56471d88ab98ec8b6316d1 2024-11-24T02:55:37,832 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/71bc943b7c56471d88ab98ec8b6316d1 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/71bc943b7c56471d88ab98ec8b6316d1 2024-11-24T02:55:37,838 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/71bc943b7c56471d88ab98ec8b6316d1, entries=15, sequenceid=192, filesize=20.7 K 2024-11-24T02:55:37,839 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~15.76 KB/16140, heapSize ~17.11 KB/17520, currentSize=11.56 KB/11836 for 3dae74ba38fd91625fd0144f5a825658 in 25ms, sequenceid=192, compaction requested=true 2024-11-24T02:55:37,839 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:37,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3dae74ba38fd91625fd0144f5a825658:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:37,839 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:37,840 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:37,840 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:37,840 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T02:55:37,841 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 104854 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:37,841 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 3dae74ba38fd91625fd0144f5a825658/info is initiating minor compaction (all files) 2024-11-24T02:55:37,841 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3dae74ba38fd91625fd0144f5a825658/info in TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:37,841 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/09f3a72da5154e1da7374e45d2d8b542, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/e4574ef132934e478f1878428376facc, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/71bc943b7c56471d88ab98ec8b6316d1] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp, totalSize=102.4 K 2024-11-24T02:55:37,841 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 09f3a72da5154e1da7374e45d2d8b542, keycount=59, bloomtype=ROW, size=67.4 K, encoding=NONE, compression=NONE, seqNum=161, earliestPutTs=1732416921557 2024-11-24T02:55:37,842 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting e4574ef132934e478f1878428376facc, keycount=9, bloomtype=ROW, size=14.3 K, encoding=NONE, compression=NONE, seqNum=174, earliestPutTs=1732416935758 2024-11-24T02:55:37,842 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 71bc943b7c56471d88ab98ec8b6316d1, keycount=15, bloomtype=ROW, size=20.7 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732416937780 2024-11-24T02:55:37,844 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/0f6b754f09f448b181dd6fa19a9febb8 is 1080, key is row0145/info:/1732416937815/Put/seqid=0 2024-11-24T02:55:37,851 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741863_1039 (size=17906) 2024-11-24T02:55:37,852 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741863_1039 (size=17906) 2024-11-24T02:55:37,852 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=207 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/0f6b754f09f448b181dd6fa19a9febb8 2024-11-24T02:55:37,858 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/0f6b754f09f448b181dd6fa19a9febb8 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/0f6b754f09f448b181dd6fa19a9febb8 2024-11-24T02:55:37,858 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3dae74ba38fd91625fd0144f5a825658#info#compaction#78 average throughput is 28.39 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:37,859 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/a76a6d0f5085486bae3031972dcd0a13 is 1080, key is row0062/info:/1732416921557/Put/seqid=0 2024-11-24T02:55:37,863 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/0f6b754f09f448b181dd6fa19a9febb8, entries=12, sequenceid=207, filesize=17.5 K 2024-11-24T02:55:37,865 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=4.20 KB/4304 for 3dae74ba38fd91625fd0144f5a825658 in 25ms, sequenceid=207, compaction requested=false 2024-11-24T02:55:37,865 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:37,866 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741864_1040 (size=95077) 2024-11-24T02:55:37,867 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741864_1040 (size=95077) 2024-11-24T02:55:37,873 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/a76a6d0f5085486bae3031972dcd0a13 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/a76a6d0f5085486bae3031972dcd0a13 2024-11-24T02:55:37,879 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3dae74ba38fd91625fd0144f5a825658/info of 3dae74ba38fd91625fd0144f5a825658 into a76a6d0f5085486bae3031972dcd0a13(size=92.8 K), total size for store is 110.3 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:37,879 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:37,879 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., storeName=3dae74ba38fd91625fd0144f5a825658/info, priority=13, startTime=1732416937839; duration=0sec 2024-11-24T02:55:37,879 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:37,879 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3dae74ba38fd91625fd0144f5a825658:info 2024-11-24T02:55:38,253 DEBUG [FsDatasetAsyncDiskServiceFixer {}] hbase.HBaseTestingUtil$FsDatasetAsyncDiskServiceFixer(597): NoSuchFieldException: threadGroup; It might because your Hadoop version > 3.2.3 or 3.3.4, See HBASE-27595 for details. 2024-11-24T02:55:38,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:38,387 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:39,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:39,388 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:39,857 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:39,857 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:55:39,863 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/8a67e1c5d91143079307e6e821497bc3 is 1080, key is row0157/info:/1732416937842/Put/seqid=0 2024-11-24T02:55:39,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741865_1041 (size=12516) 2024-11-24T02:55:39,871 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741865_1041 (size=12516) 2024-11-24T02:55:39,872 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=218 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/8a67e1c5d91143079307e6e821497bc3 2024-11-24T02:55:39,878 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/8a67e1c5d91143079307e6e821497bc3 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/8a67e1c5d91143079307e6e821497bc3 2024-11-24T02:55:39,884 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/8a67e1c5d91143079307e6e821497bc3, entries=7, sequenceid=218, filesize=12.2 K 2024-11-24T02:55:39,885 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=12.61 KB/12912 for 3dae74ba38fd91625fd0144f5a825658 in 28ms, sequenceid=218, compaction requested=true 2024-11-24T02:55:39,885 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:39,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3dae74ba38fd91625fd0144f5a825658:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:39,886 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:39,886 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:39,887 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 125499 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:39,887 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 3dae74ba38fd91625fd0144f5a825658/info is initiating minor compaction (all files) 2024-11-24T02:55:39,887 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:39,887 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3dae74ba38fd91625fd0144f5a825658/info in TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:39,887 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=13.66 KB heapSize=14.88 KB 2024-11-24T02:55:39,887 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/a76a6d0f5085486bae3031972dcd0a13, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/0f6b754f09f448b181dd6fa19a9febb8, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/8a67e1c5d91143079307e6e821497bc3] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp, totalSize=122.6 K 2024-11-24T02:55:39,888 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting a76a6d0f5085486bae3031972dcd0a13, keycount=83, bloomtype=ROW, size=92.8 K, encoding=NONE, compression=NONE, seqNum=192, earliestPutTs=1732416921557 2024-11-24T02:55:39,888 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 0f6b754f09f448b181dd6fa19a9febb8, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=207, earliestPutTs=1732416937815 2024-11-24T02:55:39,889 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 8a67e1c5d91143079307e6e821497bc3, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732416937842 2024-11-24T02:55:39,893 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/53487211760047beb6edd61ca5e29e98 is 1080, key is row0164/info:/1732416939859/Put/seqid=0 2024-11-24T02:55:39,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741866_1042 (size=19000) 2024-11-24T02:55:39,898 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741866_1042 (size=19000) 2024-11-24T02:55:39,899 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=13.66 KB at sequenceid=234 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/53487211760047beb6edd61ca5e29e98 2024-11-24T02:55:39,903 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3dae74ba38fd91625fd0144f5a825658#info#compaction#81 average throughput is 34.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:39,904 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/c68af581849f48f788c8eae910da1eb9 is 1080, key is row0062/info:/1732416921557/Put/seqid=0 2024-11-24T02:55:39,907 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/53487211760047beb6edd61ca5e29e98 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/53487211760047beb6edd61ca5e29e98 2024-11-24T02:55:39,913 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/53487211760047beb6edd61ca5e29e98, entries=13, sequenceid=234, filesize=18.6 K 2024-11-24T02:55:39,914 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~13.66 KB/13988, heapSize ~14.86 KB/15216, currentSize=10.51 KB/10760 for 3dae74ba38fd91625fd0144f5a825658 in 27ms, sequenceid=234, compaction requested=false 2024-11-24T02:55:39,914 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:39,916 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:39,916 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=12.61 KB heapSize=13.75 KB 2024-11-24T02:55:39,926 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b0c60845da6d4ba688df2aff8ca7c78f is 1080, key is row0177/info:/1732416939889/Put/seqid=0 2024-11-24T02:55:39,926 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741867_1043 (size=115665) 2024-11-24T02:55:39,927 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741867_1043 (size=115665) 2024-11-24T02:55:39,933 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/c68af581849f48f788c8eae910da1eb9 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/c68af581849f48f788c8eae910da1eb9 2024-11-24T02:55:39,934 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741868_1044 (size=17906) 2024-11-24T02:55:39,935 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741868_1044 (size=17906) 2024-11-24T02:55:39,935 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=12.61 KB at sequenceid=249 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b0c60845da6d4ba688df2aff8ca7c78f 2024-11-24T02:55:39,940 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3dae74ba38fd91625fd0144f5a825658/info of 3dae74ba38fd91625fd0144f5a825658 into c68af581849f48f788c8eae910da1eb9(size=113.0 K), total size for store is 131.5 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:39,940 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:39,940 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., storeName=3dae74ba38fd91625fd0144f5a825658/info, priority=13, startTime=1732416939885; duration=0sec 2024-11-24T02:55:39,941 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:39,941 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3dae74ba38fd91625fd0144f5a825658:info 2024-11-24T02:55:39,941 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b0c60845da6d4ba688df2aff8ca7c78f as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b0c60845da6d4ba688df2aff8ca7c78f 2024-11-24T02:55:39,946 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b0c60845da6d4ba688df2aff8ca7c78f, entries=12, sequenceid=249, filesize=17.5 K 2024-11-24T02:55:39,947 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~12.61 KB/12912, heapSize ~13.73 KB/14064, currentSize=4.20 KB/4304 for 3dae74ba38fd91625fd0144f5a825658 in 31ms, sequenceid=249, compaction requested=true 2024-11-24T02:55:39,948 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:39,948 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:39,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3dae74ba38fd91625fd0144f5a825658:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:39,948 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:39,949 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 152571 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:39,949 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HStore(1541): 3dae74ba38fd91625fd0144f5a825658/info is initiating minor compaction (all files) 2024-11-24T02:55:39,949 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3dae74ba38fd91625fd0144f5a825658/info in TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:39,949 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/c68af581849f48f788c8eae910da1eb9, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/53487211760047beb6edd61ca5e29e98, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b0c60845da6d4ba688df2aff8ca7c78f] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp, totalSize=149.0 K 2024-11-24T02:55:39,950 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] compactions.Compactor(225): Compacting c68af581849f48f788c8eae910da1eb9, keycount=102, bloomtype=ROW, size=113.0 K, encoding=NONE, compression=NONE, seqNum=218, earliestPutTs=1732416921557 2024-11-24T02:55:39,950 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] compactions.Compactor(225): Compacting 53487211760047beb6edd61ca5e29e98, keycount=13, bloomtype=ROW, size=18.6 K, encoding=NONE, compression=NONE, seqNum=234, earliestPutTs=1732416939859 2024-11-24T02:55:39,951 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] compactions.Compactor(225): Compacting b0c60845da6d4ba688df2aff8ca7c78f, keycount=12, bloomtype=ROW, size=17.5 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732416939889 2024-11-24T02:55:39,963 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3dae74ba38fd91625fd0144f5a825658#info#compaction#83 average throughput is 43.44 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:39,963 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/2bfc95d6157a494990467168e22fa033 is 1080, key is row0062/info:/1732416921557/Put/seqid=0 2024-11-24T02:55:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741869_1045 (size=142922) 2024-11-24T02:55:39,969 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741869_1045 (size=142922) 2024-11-24T02:55:39,976 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/2bfc95d6157a494990467168e22fa033 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/2bfc95d6157a494990467168e22fa033 2024-11-24T02:55:39,983 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3dae74ba38fd91625fd0144f5a825658/info of 3dae74ba38fd91625fd0144f5a825658 into 2bfc95d6157a494990467168e22fa033(size=139.6 K), total size for store is 139.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:39,983 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:39,983 INFO [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., storeName=3dae74ba38fd91625fd0144f5a825658/info, priority=13, startTime=1732416939948; duration=0sec 2024-11-24T02:55:39,983 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:39,983 DEBUG [RS:0;7c69a60bd8f6:34923-longCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3dae74ba38fd91625fd0144f5a825658:info 2024-11-24T02:55:40,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:40,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:41,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:41,389 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:41,933 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:41,934 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=7.36 KB heapSize=8.13 KB 2024-11-24T02:55:41,939 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/9028429d71894064825981c111a5a2fc is 1080, key is row0189/info:/1732416939918/Put/seqid=0 2024-11-24T02:55:41,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741870_1046 (size=12519) 2024-11-24T02:55:41,944 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741870_1046 (size=12519) 2024-11-24T02:55:41,945 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.36 KB at sequenceid=261 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/9028429d71894064825981c111a5a2fc 2024-11-24T02:55:41,951 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/9028429d71894064825981c111a5a2fc as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/9028429d71894064825981c111a5a2fc 2024-11-24T02:55:41,957 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/9028429d71894064825981c111a5a2fc, entries=7, sequenceid=261, filesize=12.2 K 2024-11-24T02:55:41,958 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.36 KB/7532, heapSize ~8.11 KB/8304, currentSize=10.51 KB/10760 for 3dae74ba38fd91625fd0144f5a825658 in 25ms, sequenceid=261, compaction requested=false 2024-11-24T02:55:41,958 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:41,959 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:41,959 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T02:55:41,963 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/7f9f9f44a7644324a88fec26b823398b is 1080, key is row0196/info:/1732416941935/Put/seqid=0 2024-11-24T02:55:41,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741871_1047 (size=16839) 2024-11-24T02:55:41,982 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741871_1047 (size=16839) 2024-11-24T02:55:41,983 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=275 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/7f9f9f44a7644324a88fec26b823398b 2024-11-24T02:55:41,990 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/7f9f9f44a7644324a88fec26b823398b as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7f9f9f44a7644324a88fec26b823398b 2024-11-24T02:55:41,995 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7f9f9f44a7644324a88fec26b823398b, entries=11, sequenceid=275, filesize=16.4 K 2024-11-24T02:55:41,996 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=18.91 KB/19368 for 3dae74ba38fd91625fd0144f5a825658 in 37ms, sequenceid=275, compaction requested=true 2024-11-24T02:55:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3dae74ba38fd91625fd0144f5a825658:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:41,996 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:41,996 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:41,997 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 172280 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:41,997 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 3dae74ba38fd91625fd0144f5a825658/info is initiating minor compaction (all files) 2024-11-24T02:55:41,997 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3dae74ba38fd91625fd0144f5a825658/info in TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:41,997 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/2bfc95d6157a494990467168e22fa033, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/9028429d71894064825981c111a5a2fc, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7f9f9f44a7644324a88fec26b823398b] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp, totalSize=168.2 K 2024-11-24T02:55:41,998 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 2bfc95d6157a494990467168e22fa033, keycount=127, bloomtype=ROW, size=139.6 K, encoding=NONE, compression=NONE, seqNum=249, earliestPutTs=1732416921557 2024-11-24T02:55:41,998 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 9028429d71894064825981c111a5a2fc, keycount=7, bloomtype=ROW, size=12.2 K, encoding=NONE, compression=NONE, seqNum=261, earliestPutTs=1732416939918 2024-11-24T02:55:41,998 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 7f9f9f44a7644324a88fec26b823398b, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732416941935 2024-11-24T02:55:42,011 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3dae74ba38fd91625fd0144f5a825658#info#compaction#86 average throughput is 49.60 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:42,011 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/bd3155e902e54ecd82128c573dee3cfb is 1080, key is row0062/info:/1732416921557/Put/seqid=0 2024-11-24T02:55:42,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741872_1048 (size=162446) 2024-11-24T02:55:42,018 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741872_1048 (size=162446) 2024-11-24T02:55:42,024 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/bd3155e902e54ecd82128c573dee3cfb as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/bd3155e902e54ecd82128c573dee3cfb 2024-11-24T02:55:42,029 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3dae74ba38fd91625fd0144f5a825658/info of 3dae74ba38fd91625fd0144f5a825658 into bd3155e902e54ecd82128c573dee3cfb(size=158.6 K), total size for store is 158.6 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:42,029 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:42,030 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., storeName=3dae74ba38fd91625fd0144f5a825658/info, priority=13, startTime=1732416941996; duration=0sec 2024-11-24T02:55:42,030 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:42,030 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3dae74ba38fd91625fd0144f5a825658:info 2024-11-24T02:55:42,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:42,390 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:43,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:43,391 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:43,999 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:43,999 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=19.96 KB heapSize=21.63 KB 2024-11-24T02:55:44,005 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/d173b56382cc4609accd9b3545af1ce8 is 1080, key is row0207/info:/1732416941960/Put/seqid=0 2024-11-24T02:55:44,011 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741873_1049 (size=25491) 2024-11-24T02:55:44,012 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741873_1049 (size=25491) 2024-11-24T02:55:44,012 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=19.96 KB at sequenceid=298 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/d173b56382cc4609accd9b3545af1ce8 2024-11-24T02:55:44,018 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/d173b56382cc4609accd9b3545af1ce8 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/d173b56382cc4609accd9b3545af1ce8 2024-11-24T02:55:44,024 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/d173b56382cc4609accd9b3545af1ce8, entries=19, sequenceid=298, filesize=24.9 K 2024-11-24T02:55:44,025 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~19.96 KB/20444, heapSize ~21.61 KB/22128, currentSize=10.51 KB/10760 for 3dae74ba38fd91625fd0144f5a825658 in 26ms, sequenceid=298, compaction requested=false 2024-11-24T02:55:44,025 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:44,026 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:44,026 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T02:55:44,030 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/47a2332e498341aa97b9747d5447f884 is 1080, key is row0226/info:/1732416944001/Put/seqid=0 2024-11-24T02:55:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741874_1050 (size=16839) 2024-11-24T02:55:44,035 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741874_1050 (size=16839) 2024-11-24T02:55:44,036 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=312 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/47a2332e498341aa97b9747d5447f884 2024-11-24T02:55:44,042 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/47a2332e498341aa97b9747d5447f884 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/47a2332e498341aa97b9747d5447f884 2024-11-24T02:55:44,048 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/47a2332e498341aa97b9747d5447f884, entries=11, sequenceid=312, filesize=16.4 K 2024-11-24T02:55:44,049 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=10.51 KB/10760 for 3dae74ba38fd91625fd0144f5a825658 in 23ms, sequenceid=312, compaction requested=true 2024-11-24T02:55:44,049 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:44,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(403): Add compact mark for store 3dae74ba38fd91625fd0144f5a825658:info, priority=-2147483648, current under compaction store size is 1 2024-11-24T02:55:44,049 DEBUG [MemStoreFlusher.0 {}] regionserver.CompactSplit(411): Small Compaction requested: system; Because: MemStoreFlusher.0; compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:44,049 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.SortedCompactionPolicy(75): Selecting compaction from 3 store files, 0 compacting, 3 eligible, 16 blocking 2024-11-24T02:55:44,050 DEBUG [RpcServer.default.FPBQ.Fifo.handler=2,queue=0,port=34923 {}] regionserver.HRegion(8855): Flush requested on 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:44,051 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.ExploringCompactionPolicy(116): Exploring compaction algorithm has selected 3 files of size 204776 starting at candidate #0 after considering 1 permutations with 1 in ratio 2024-11-24T02:55:44,051 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=11.56 KB heapSize=12.63 KB 2024-11-24T02:55:44,051 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1541): 3dae74ba38fd91625fd0144f5a825658/info is initiating minor compaction (all files) 2024-11-24T02:55:44,051 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2416): Starting compaction of 3dae74ba38fd91625fd0144f5a825658/info in TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:44,051 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1176): Starting compaction of [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/bd3155e902e54ecd82128c573dee3cfb, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/d173b56382cc4609accd9b3545af1ce8, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/47a2332e498341aa97b9747d5447f884] into tmpdir=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp, totalSize=200.0 K 2024-11-24T02:55:44,051 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting bd3155e902e54ecd82128c573dee3cfb, keycount=145, bloomtype=ROW, size=158.6 K, encoding=NONE, compression=NONE, seqNum=275, earliestPutTs=1732416921557 2024-11-24T02:55:44,052 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting d173b56382cc4609accd9b3545af1ce8, keycount=19, bloomtype=ROW, size=24.9 K, encoding=NONE, compression=NONE, seqNum=298, earliestPutTs=1732416941960 2024-11-24T02:55:44,052 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] compactions.Compactor(225): Compacting 47a2332e498341aa97b9747d5447f884, keycount=11, bloomtype=ROW, size=16.4 K, encoding=NONE, compression=NONE, seqNum=312, earliestPutTs=1732416944001 2024-11-24T02:55:44,055 DEBUG [MemStoreFlusher.0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/bf08012fb54944b582f2922884dd60c6 is 1080, key is row0237/info:/1732416944027/Put/seqid=0 2024-11-24T02:55:44,069 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] throttle.PressureAwareThroughputController(145): 3dae74ba38fd91625fd0144f5a825658#info#compaction#90 average throughput is 44.89 MB/second, slept 0 time(s) and total slept time is 0 ms. 0 active operations remaining, total limit is 50.00 MB/second 2024-11-24T02:55:44,069 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b800777e0a7646b99af13bdce1b80cff is 1080, key is row0062/info:/1732416921557/Put/seqid=0 2024-11-24T02:55:44,077 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741875_1051 (size=16839) 2024-11-24T02:55:44,078 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741875_1051 (size=16839) 2024-11-24T02:55:44,078 INFO [MemStoreFlusher.0 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=11.56 KB at sequenceid=326 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/bf08012fb54944b582f2922884dd60c6 2024-11-24T02:55:44,084 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/bf08012fb54944b582f2922884dd60c6 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/bf08012fb54944b582f2922884dd60c6 2024-11-24T02:55:44,089 INFO [MemStoreFlusher.0 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/bf08012fb54944b582f2922884dd60c6, entries=11, sequenceid=326, filesize=16.4 K 2024-11-24T02:55:44,090 INFO [MemStoreFlusher.0 {}] regionserver.HRegion(3140): Finished flush of dataSize ~11.56 KB/11836, heapSize ~12.61 KB/12912, currentSize=9.46 KB/9684 for 3dae74ba38fd91625fd0144f5a825658 in 39ms, sequenceid=326, compaction requested=false 2024-11-24T02:55:44,090 DEBUG [MemStoreFlusher.0 {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:44,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741876_1052 (size=194926) 2024-11-24T02:55:44,091 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741876_1052 (size=194926) 2024-11-24T02:55:44,097 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b800777e0a7646b99af13bdce1b80cff as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b800777e0a7646b99af13bdce1b80cff 2024-11-24T02:55:44,103 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HStore(1337): Completed compaction of 3 (all) file(s) in 3dae74ba38fd91625fd0144f5a825658/info of 3dae74ba38fd91625fd0144f5a825658 into b800777e0a7646b99af13bdce1b80cff(size=190.4 K), total size for store is 206.8 K. This selection was in queue for 0sec, and took 0sec to execute. 2024-11-24T02:55:44,103 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.HRegion(2446): Compaction status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:44,103 INFO [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(669): Completed compaction region=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., storeName=3dae74ba38fd91625fd0144f5a825658/info, priority=13, startTime=1732416944049; duration=0sec 2024-11-24T02:55:44,103 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(701): Status compactionQueue=(longCompactions=0:shortCompactions=0), splitQueue=0 2024-11-24T02:55:44,103 DEBUG [RS:0;7c69a60bd8f6:34923-shortCompactions-0 {}] regionserver.CompactSplit$CompactionRunner(719): Remove under compaction mark for store: 3dae74ba38fd91625fd0144f5a825658:info 2024-11-24T02:55:44,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:44,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:45,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:45,392 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:46,070 INFO [Time-limited test {}] wal.AbstractTestLogRolling(285): after writing there are 0 log files 2024-11-24T02:55:46,071 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34923%2C1732416908455.1732416946070 2024-11-24T02:55:46,078 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,078 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,079 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,079 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,079 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,079 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416908970 with entries=315, filesize=309.33 KB; new WAL /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416946070 2024-11-24T02:55:46,080 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:39783:39783),(127.0.0.1/127.0.0.1:36005:36005)] 2024-11-24T02:55:46,080 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416908970 is not closed yet, will try archiving it next time 2024-11-24T02:55:46,081 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741833_1009 (size=316763) 2024-11-24T02:55:46,082 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741833_1009 (size=316763) 2024-11-24T02:55:46,086 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 3dae74ba38fd91625fd0144f5a825658 1/1 column families, dataSize=9.46 KB heapSize=10.38 KB 2024-11-24T02:55:46,090 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b68c768953bb49388cf99669c0b737fe is 1080, key is row0248/info:/1732416944052/Put/seqid=0 2024-11-24T02:55:46,094 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741878_1054 (size=14681) 2024-11-24T02:55:46,095 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741878_1054 (size=14681) 2024-11-24T02:55:46,095 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=9.46 KB at sequenceid=339 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b68c768953bb49388cf99669c0b737fe 2024-11-24T02:55:46,101 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/.tmp/info/b68c768953bb49388cf99669c0b737fe as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b68c768953bb49388cf99669c0b737fe 2024-11-24T02:55:46,107 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b68c768953bb49388cf99669c0b737fe, entries=9, sequenceid=339, filesize=14.3 K 2024-11-24T02:55:46,108 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~9.46 KB/9684, heapSize ~10.36 KB/10608, currentSize=0 B/0 for 3dae74ba38fd91625fd0144f5a825658 in 23ms, sequenceid=339, compaction requested=true 2024-11-24T02:55:46,108 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 3dae74ba38fd91625fd0144f5a825658: 2024-11-24T02:55:46,108 INFO [Time-limited test {}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=670 B heapSize=2.02 KB 2024-11-24T02:55:46,112 DEBUG [Time-limited test {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/info/f125a96f38b04de0b284722dd0cdefd9 is 186, key is TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d./info:regioninfo/1732416924406/Put/seqid=0 2024-11-24T02:55:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741879_1055 (size=6153) 2024-11-24T02:55:46,116 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741879_1055 (size=6153) 2024-11-24T02:55:46,117 INFO [Time-limited test {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=670 B at sequenceid=21 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/info/f125a96f38b04de0b284722dd0cdefd9 2024-11-24T02:55:46,122 DEBUG [Time-limited test {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/.tmp/info/f125a96f38b04de0b284722dd0cdefd9 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/info/f125a96f38b04de0b284722dd0cdefd9 2024-11-24T02:55:46,127 INFO [Time-limited test {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/info/f125a96f38b04de0b284722dd0cdefd9, entries=5, sequenceid=21, filesize=6.0 K 2024-11-24T02:55:46,128 INFO [Time-limited test {}] regionserver.HRegion(3140): Finished flush of dataSize ~670 B/670, heapSize ~1.25 KB/1280, currentSize=0 B/0 for 1588230740 in 20ms, sequenceid=21, compaction requested=false 2024-11-24T02:55:46,128 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 1588230740: 2024-11-24T02:55:46,128 DEBUG [Time-limited test {}] regionserver.HRegion(2603): Flush status journal for 2eba1d2bd66767e0fb78fd29bbe1d40d: 2024-11-24T02:55:46,129 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C34923%2C1732416908455.1732416946129 2024-11-24T02:55:46,137 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,137 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,137 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,137 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,137 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,138 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416946070 with entries=2, filesize=723 B; new WAL /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416946129 2024-11-24T02:55:46,139 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36005:36005),(127.0.0.1/127.0.0.1:39783:39783)] 2024-11-24T02:55:46,139 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416946070 is not closed yet, will try archiving it next time 2024-11-24T02:55:46,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741877_1053 (size=731) 2024-11-24T02:55:46,139 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741877_1053 (size=731) 2024-11-24T02:55:46,139 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416908970 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/oldWALs/7c69a60bd8f6%2C34923%2C1732416908455.1732416908970 2024-11-24T02:55:46,140 INFO [Time-limited test {}] hbase.Waiter(181): Waiting up to [5,000] milli-secs(wait.for.ratio=[1]) 2024-11-24T02:55:46,140 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:55:46,140 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:55:46,140 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:46,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:46,140 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/WALs/7c69a60bd8f6,34923,1732416908455/7c69a60bd8f6%2C34923%2C1732416908455.1732416946070 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/oldWALs/7c69a60bd8f6%2C34923%2C1732416908455.1732416946070 2024-11-24T02:55:46,140 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:46,140 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:55:46,140 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:55:46,141 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=1540438130, stopped=false 2024-11-24T02:55:46,141 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,41039,1732416908275 2024-11-24T02:55:46,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:46,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:46,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:46,178 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:46,178 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:55:46,178 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:55:46,178 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:46,178 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:46,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:46,179 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:46,179 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,34923,1732416908455' ***** 2024-11-24T02:55:46,179 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:55:46,179 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:55:46,179 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(3091): Received CLOSE for 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(3091): Received CLOSE for 2eba1d2bd66767e0fb78fd29bbe1d40d 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:34923. 2024-11-24T02:55:46,180 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 3dae74ba38fd91625fd0144f5a825658, disabling compactions & flushes 2024-11-24T02:55:46,180 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:46,180 DEBUG [RS:0;7c69a60bd8f6:34923 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:46,180 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:46,180 DEBUG [RS:0;7c69a60bd8f6:34923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:46,180 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. after waiting 0 ms 2024-11-24T02:55:46,180 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:55:46,180 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:55:46,181 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1321): Waiting on 3 regions to close 2024-11-24T02:55:46,181 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1325): Online Regions={3dae74ba38fd91625fd0144f5a825658=TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658., 1588230740=hbase:meta,,1.1588230740, 2eba1d2bd66767e0fb78fd29bbe1d40d=TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.} 2024-11-24T02:55:46,181 DEBUG [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1351): Waiting on 1588230740, 2eba1d2bd66767e0fb78fd29bbe1d40d, 3dae74ba38fd91625fd0144f5a825658 2024-11-24T02:55:46,181 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:55:46,181 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:55:46,181 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:55:46,181 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:55:46,181 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:55:46,181 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8->hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242-top, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/336af985745f477d92d78cef15c08698, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/5629f47265c140e99fffa8f9bddb039f, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/09f3a72da5154e1da7374e45d2d8b542, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/cb57b581d90144b38730a4de18dafb39, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/e4574ef132934e478f1878428376facc, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/a76a6d0f5085486bae3031972dcd0a13, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/71bc943b7c56471d88ab98ec8b6316d1, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/0f6b754f09f448b181dd6fa19a9febb8, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/c68af581849f48f788c8eae910da1eb9, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/8a67e1c5d91143079307e6e821497bc3, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/53487211760047beb6edd61ca5e29e98, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/2bfc95d6157a494990467168e22fa033, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b0c60845da6d4ba688df2aff8ca7c78f, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/9028429d71894064825981c111a5a2fc, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/bd3155e902e54ecd82128c573dee3cfb, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7f9f9f44a7644324a88fec26b823398b, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/d173b56382cc4609accd9b3545af1ce8, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/47a2332e498341aa97b9747d5447f884] to archive 2024-11-24T02:55:46,182 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T02:55:46,184 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:46,185 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-b762a624c8c141f6af4fc75f8c6cf423 2024-11-24T02:55:46,186 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/336af985745f477d92d78cef15c08698 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/336af985745f477d92d78cef15c08698 2024-11-24T02:55:46,187 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/hbase/meta/1588230740/recovered.edits/24.seqid, newMaxSeqId=24, maxSeqId=1 2024-11-24T02:55:46,188 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/TestLogRolling-testLogRolling=768cc1871b22dcaa3f13dc427eba7bc8-7cf3c6cc30994cfbb8a71a3174766d97 2024-11-24T02:55:46,188 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:55:46,188 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:46,188 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416946181Running coprocessor pre-close hooks at 1732416946181Disabling compacts and flushes for region at 1732416946181Disabling writes for close at 1732416946181Writing region close event to WAL at 1732416946184 (+3 ms)Running coprocessor post-close hooks at 1732416946188 (+4 ms)Closed at 1732416946188 2024-11-24T02:55:46,188 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:46,189 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/5629f47265c140e99fffa8f9bddb039f to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/5629f47265c140e99fffa8f9bddb039f 2024-11-24T02:55:46,190 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/09f3a72da5154e1da7374e45d2d8b542 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/09f3a72da5154e1da7374e45d2d8b542 2024-11-24T02:55:46,191 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/cb57b581d90144b38730a4de18dafb39 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/cb57b581d90144b38730a4de18dafb39 2024-11-24T02:55:46,192 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/e4574ef132934e478f1878428376facc to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/e4574ef132934e478f1878428376facc 2024-11-24T02:55:46,193 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/a76a6d0f5085486bae3031972dcd0a13 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/a76a6d0f5085486bae3031972dcd0a13 2024-11-24T02:55:46,195 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/71bc943b7c56471d88ab98ec8b6316d1 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/71bc943b7c56471d88ab98ec8b6316d1 2024-11-24T02:55:46,196 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/0f6b754f09f448b181dd6fa19a9febb8 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/0f6b754f09f448b181dd6fa19a9febb8 2024-11-24T02:55:46,197 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/c68af581849f48f788c8eae910da1eb9 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/c68af581849f48f788c8eae910da1eb9 2024-11-24T02:55:46,198 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/8a67e1c5d91143079307e6e821497bc3 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/8a67e1c5d91143079307e6e821497bc3 2024-11-24T02:55:46,199 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/53487211760047beb6edd61ca5e29e98 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/53487211760047beb6edd61ca5e29e98 2024-11-24T02:55:46,200 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/2bfc95d6157a494990467168e22fa033 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/2bfc95d6157a494990467168e22fa033 2024-11-24T02:55:46,202 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b0c60845da6d4ba688df2aff8ca7c78f to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/b0c60845da6d4ba688df2aff8ca7c78f 2024-11-24T02:55:46,203 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/9028429d71894064825981c111a5a2fc to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/9028429d71894064825981c111a5a2fc 2024-11-24T02:55:46,204 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/bd3155e902e54ecd82128c573dee3cfb to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/bd3155e902e54ecd82128c573dee3cfb 2024-11-24T02:55:46,205 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7f9f9f44a7644324a88fec26b823398b to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/7f9f9f44a7644324a88fec26b823398b 2024-11-24T02:55:46,206 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/d173b56382cc4609accd9b3545af1ce8 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/d173b56382cc4609accd9b3545af1ce8 2024-11-24T02:55:46,207 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/47a2332e498341aa97b9747d5447f884 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/info/47a2332e498341aa97b9747d5447f884 2024-11-24T02:55:46,208 DEBUG [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] regionserver.HRegionServer(3532): Failed to report file archival(s) to Master. This will be retried. org.apache.hadoop.hbase.ipc.StoppedRpcClientException: Call to address=7c69a60bd8f6:41039 failed on local exception: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance0(Native Method) ~[?:?] at jdk.internal.reflect.NativeConstructorAccessorImpl.newInstance(NativeConstructorAccessorImpl.java:77) ~[?:?] at jdk.internal.reflect.DelegatingConstructorAccessorImpl.newInstance(DelegatingConstructorAccessorImpl.java:45) ~[?:?] at java.lang.reflect.Constructor.newInstanceWithCaller(Constructor.java:499) ~[?:?] at java.lang.reflect.Constructor.newInstance(Constructor.java:480) ~[?:?] at org.apache.hadoop.hbase.ipc.IPCUtil.wrapException(IPCUtil.java:237) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.onCallFinished(AbstractRpcClient.java:395) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:430) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$3.run(AbstractRpcClient.java:425) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.callComplete(Call.java:117) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.Call.setException(Call.java:132) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:451) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callBlockingMethod(AbstractRpcClient.java:336) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient$BlockingRpcChannelImplementation.callBlockingMethod(AbstractRpcClient.java:606) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.shaded.protobuf.generated.RegionServerStatusProtos$RegionServerStatusService$BlockingStub.reportFileArchival(RegionServerStatusProtos.java:17350) ~[hbase-protocol-shaded-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.HRegionServer.reportFileArchivalForQuotas(HRegionServer.java:3516) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.reportArchivedFilesForQuota(HStore.java:2412) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.removeCompactedfiles(HStore.java:2347) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.closeWithoutLock(HStore.java:738) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HStore.close(HStore.java:804) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1912) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.HRegion$2.call(HRegion.java:1909) ~[classes/:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:539) ~[?:?] at java.util.concurrent.FutureTask.run(FutureTask.java:264) ~[?:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: org.apache.hadoop.hbase.ipc.StoppedRpcClientException at org.apache.hadoop.hbase.ipc.AbstractRpcClient.getConnection(AbstractRpcClient.java:366) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.ipc.AbstractRpcClient.callMethod(AbstractRpcClient.java:448) ~[hbase-client-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] ... 16 more 2024-11-24T02:55:46,208 WARN [StoreCloser-TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [336af985745f477d92d78cef15c08698=42984, 5629f47265c140e99fffa8f9bddb039f=12516, 09f3a72da5154e1da7374e45d2d8b542=69026, cb57b581d90144b38730a4de18dafb39=23316, e4574ef132934e478f1878428376facc=14672, a76a6d0f5085486bae3031972dcd0a13=95077, 71bc943b7c56471d88ab98ec8b6316d1=21156, 0f6b754f09f448b181dd6fa19a9febb8=17906, c68af581849f48f788c8eae910da1eb9=115665, 8a67e1c5d91143079307e6e821497bc3=12516, 53487211760047beb6edd61ca5e29e98=19000, 2bfc95d6157a494990467168e22fa033=142922, b0c60845da6d4ba688df2aff8ca7c78f=17906, 9028429d71894064825981c111a5a2fc=12519, bd3155e902e54ecd82128c573dee3cfb=162446, 7f9f9f44a7644324a88fec26b823398b=16839, d173b56382cc4609accd9b3545af1ce8=25491, 47a2332e498341aa97b9747d5447f884=16839] 2024-11-24T02:55:46,212 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/3dae74ba38fd91625fd0144f5a825658/recovered.edits/342.seqid, newMaxSeqId=342, maxSeqId=130 2024-11-24T02:55:46,212 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:46,213 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 3dae74ba38fd91625fd0144f5a825658: Waiting for close lock at 1732416946180Running coprocessor pre-close hooks at 1732416946180Disabling compacts and flushes for region at 1732416946180Disabling writes for close at 1732416946180Writing region close event to WAL at 1732416946208 (+28 ms)Running coprocessor post-close hooks at 1732416946212 (+4 ms)Closed at 1732416946212 2024-11-24T02:55:46,213 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,row0062,1732416923680.3dae74ba38fd91625fd0144f5a825658. 2024-11-24T02:55:46,213 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1722): Closing 2eba1d2bd66767e0fb78fd29bbe1d40d, disabling compactions & flushes 2024-11-24T02:55:46,213 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1755): Closing region TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:46,213 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1776): Time limited wait for close lock on TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:46,213 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1843): Acquired close lock on TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. after waiting 0 ms 2024-11-24T02:55:46,213 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1853): Updates disabled for region TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:46,213 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.-1 {}] regionserver.HStore(2317): Moving the files [hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8->hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/768cc1871b22dcaa3f13dc427eba7bc8/info/7380b522f06e4a91beb0a8b0153fb242-bottom] to archive 2024-11-24T02:55:46,214 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.-1 {}] backup.HFileArchiver(360): Archiving compacted files. 2024-11-24T02:55:46,216 DEBUG [StoreCloser-TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.-1 {}] backup.HFileArchiver(596): Archived from FileableStoreFile, hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8 to hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/archive/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/info/7380b522f06e4a91beb0a8b0153fb242.768cc1871b22dcaa3f13dc427eba7bc8 2024-11-24T02:55:46,216 WARN [StoreCloser-TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d.-1 {}] regionserver.HStore(2414): Failed to report archival of files: [] 2024-11-24T02:55:46,220 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/data/default/TestLogRolling-testLogRolling/2eba1d2bd66767e0fb78fd29bbe1d40d/recovered.edits/135.seqid, newMaxSeqId=135, maxSeqId=130 2024-11-24T02:55:46,220 INFO [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1973): Closed TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:46,221 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] regionserver.HRegion(1676): Region close journal for 2eba1d2bd66767e0fb78fd29bbe1d40d: Waiting for close lock at 1732416946213Running coprocessor pre-close hooks at 1732416946213Disabling compacts and flushes for region at 1732416946213Disabling writes for close at 1732416946213Writing region close event to WAL at 1732416946216 (+3 ms)Running coprocessor post-close hooks at 1732416946220 (+4 ms)Closed at 1732416946220 2024-11-24T02:55:46,221 DEBUG [RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_REGION}] handler.CloseRegionHandler(117): Closed TestLogRolling-testLogRolling,,1732416923680.2eba1d2bd66767e0fb78fd29bbe1d40d. 2024-11-24T02:55:46,381 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,34923,1732416908455; all regions closed. 2024-11-24T02:55:46,381 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,382 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,382 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,382 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,382 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741834_1010 (size=8107) 2024-11-24T02:55:46,385 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741834_1010 (size=8107) 2024-11-24T02:55:46,389 DEBUG [RS:0;7c69a60bd8f6:34923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/oldWALs 2024-11-24T02:55:46,389 INFO [RS:0;7c69a60bd8f6:34923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C34923%2C1732416908455.meta:.meta(num 1732416909168) 2024-11-24T02:55:46,390 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,390 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,390 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,390 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,391 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741880_1056 (size=780) 2024-11-24T02:55:46,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:46,393 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741880_1056 (size=780) 2024-11-24T02:55:46,393 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:46,396 DEBUG [RS:0;7c69a60bd8f6:34923 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/oldWALs 2024-11-24T02:55:46,396 INFO [RS:0;7c69a60bd8f6:34923 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C34923%2C1732416908455:(num 1732416946129) 2024-11-24T02:55:46,396 DEBUG [RS:0;7c69a60bd8f6:34923 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:46,396 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:55:46,396 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:55:46,396 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS] on shutdown 2024-11-24T02:55:46,396 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:55:46,396 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:55:46,397 INFO [RS:0;7c69a60bd8f6:34923 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:34923 2024-11-24T02:55:46,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,34923,1732416908455 2024-11-24T02:55:46,409 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:55:46,410 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:55:46,420 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,34923,1732416908455] 2024-11-24T02:55:46,430 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,34923,1732416908455 already deleted, retry=false 2024-11-24T02:55:46,430 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,34923,1732416908455 expired; onlineServers=0 2024-11-24T02:55:46,431 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,41039,1732416908275' ***** 2024-11-24T02:55:46,431 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:55:46,431 INFO [M:0;7c69a60bd8f6:41039 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:55:46,431 INFO [M:0;7c69a60bd8f6:41039 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:55:46,431 DEBUG [M:0;7c69a60bd8f6:41039 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:55:46,431 DEBUG [M:0;7c69a60bd8f6:41039 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:55:46,431 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:55:46,431 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416908801 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416908801,5,FailOnTimeoutGroup] 2024-11-24T02:55:46,431 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416908802 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416908802,5,FailOnTimeoutGroup] 2024-11-24T02:55:46,431 INFO [M:0;7c69a60bd8f6:41039 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:55:46,431 INFO [M:0;7c69a60bd8f6:41039 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:55:46,431 DEBUG [M:0;7c69a60bd8f6:41039 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:55:46,431 INFO [M:0;7c69a60bd8f6:41039 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:55:46,431 INFO [M:0;7c69a60bd8f6:41039 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:55:46,432 INFO [M:0;7c69a60bd8f6:41039 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:55:46,432 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:55:46,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:55:46,441 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:46,441 DEBUG [M:0;7c69a60bd8f6:41039 {}] zookeeper.ZKUtil(347): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T02:55:46,441 WARN [M:0;7c69a60bd8f6:41039 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T02:55:46,442 INFO [M:0;7c69a60bd8f6:41039 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/.lastflushedseqids 2024-11-24T02:55:46,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741881_1057 (size=228) 2024-11-24T02:55:46,447 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741881_1057 (size=228) 2024-11-24T02:55:46,448 INFO [M:0;7c69a60bd8f6:41039 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:55:46,449 INFO [M:0;7c69a60bd8f6:41039 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:55:46,449 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:55:46,449 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:46,449 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:46,449 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:55:46,449 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:46,449 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=51.43 KB heapSize=63.38 KB 2024-11-24T02:55:46,472 DEBUG [M:0;7c69a60bd8f6:41039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c63c8ea8d31d4dd092134d84946108f0 is 82, key is hbase:meta,,1/info:regioninfo/1732416909200/Put/seqid=0 2024-11-24T02:55:46,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741882_1058 (size=5672) 2024-11-24T02:55:46,476 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741882_1058 (size=5672) 2024-11-24T02:55:46,479 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c63c8ea8d31d4dd092134d84946108f0 2024-11-24T02:55:46,501 DEBUG [M:0;7c69a60bd8f6:41039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c0064792cfb548a29ae268b52bd0c5b2 is 750, key is \x00\x00\x00\x00\x00\x00\x00\x04/proc:d/1732416909679/Put/seqid=0 2024-11-24T02:55:46,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741883_1059 (size=7090) 2024-11-24T02:55:46,505 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741883_1059 (size=7090) 2024-11-24T02:55:46,508 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=50.83 KB at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c0064792cfb548a29ae268b52bd0c5b2 2024-11-24T02:55:46,513 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c0064792cfb548a29ae268b52bd0c5b2 2024-11-24T02:55:46,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:46,520 INFO [RS:0;7c69a60bd8f6:34923 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:55:46,520 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:34923-0x1016ac4278c0001, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:46,520 INFO [RS:0;7c69a60bd8f6:34923 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,34923,1732416908455; zookeeper connection closed. 2024-11-24T02:55:46,520 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@1f8c4649 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@1f8c4649 2024-11-24T02:55:46,521 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T02:55:46,529 DEBUG [M:0;7c69a60bd8f6:41039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5ef4dca686c343558ed17630c946eead is 69, key is 7c69a60bd8f6,34923,1732416908455/rs:state/1732416908828/Put/seqid=0 2024-11-24T02:55:46,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741884_1060 (size=5156) 2024-11-24T02:55:46,534 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741884_1060 (size=5156) 2024-11-24T02:55:46,540 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5ef4dca686c343558ed17630c946eead 2024-11-24T02:55:46,559 DEBUG [M:0;7c69a60bd8f6:41039 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f1f6909b64514a3ea7890dc5e17e525e is 52, key is load_balancer_on/state:d/1732416909302/Put/seqid=0 2024-11-24T02:55:46,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741885_1061 (size=5056) 2024-11-24T02:55:46,563 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741885_1061 (size=5056) 2024-11-24T02:55:46,564 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=125 (bloomFilter=true), to=hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f1f6909b64514a3ea7890dc5e17e525e 2024-11-24T02:55:46,569 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/c63c8ea8d31d4dd092134d84946108f0 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c63c8ea8d31d4dd092134d84946108f0 2024-11-24T02:55:46,574 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/c63c8ea8d31d4dd092134d84946108f0, entries=8, sequenceid=125, filesize=5.5 K 2024-11-24T02:55:46,574 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/c0064792cfb548a29ae268b52bd0c5b2 as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c0064792cfb548a29ae268b52bd0c5b2 2024-11-24T02:55:46,579 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.StoreFileReader(518): Loaded Delete Family Bloom (CompoundBloomFilter) metadata for c0064792cfb548a29ae268b52bd0c5b2 2024-11-24T02:55:46,579 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/c0064792cfb548a29ae268b52bd0c5b2, entries=13, sequenceid=125, filesize=6.9 K 2024-11-24T02:55:46,580 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/5ef4dca686c343558ed17630c946eead as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5ef4dca686c343558ed17630c946eead 2024-11-24T02:55:46,584 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/5ef4dca686c343558ed17630c946eead, entries=1, sequenceid=125, filesize=5.0 K 2024-11-24T02:55:46,585 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/f1f6909b64514a3ea7890dc5e17e525e as hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f1f6909b64514a3ea7890dc5e17e525e 2024-11-24T02:55:46,590 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:46357/user/jenkins/test-data/15f19fda-d990-c09c-f111-79cfa6d2df7e/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/f1f6909b64514a3ea7890dc5e17e525e, entries=1, sequenceid=125, filesize=4.9 K 2024-11-24T02:55:46,590 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(3140): Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=125, compaction requested=false 2024-11-24T02:55:46,592 INFO [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:46,592 DEBUG [M:0;7c69a60bd8f6:41039 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416946449Disabling compacts and flushes for region at 1732416946449Disabling writes for close at 1732416946449Obtaining lock to block concurrent updates at 1732416946449Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416946449Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=52663, getHeapSize=64840, getOffHeapSize=0, getCellsCount=148 at 1732416946450 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416946450Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416946451 (+1 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416946472 (+21 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416946472Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416946483 (+11 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416946500 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416946500Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416946513 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416946529 (+16 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416946529Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732416946544 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732416946558 (+14 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732416946559 (+1 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@742b0140: reopening flushed file at 1732416946568 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@384e9998: reopening flushed file at 1732416946574 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@2b0caf30: reopening flushed file at 1732416946579 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4cfca315: reopening flushed file at 1732416946584 (+5 ms)Finished flush of dataSize ~51.43 KB/52663, heapSize ~63.32 KB/64840, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 141ms, sequenceid=125, compaction requested=false at 1732416946590 (+6 ms)Writing region close event to WAL at 1732416946592 (+2 ms)Closed at 1732416946592 2024-11-24T02:55:46,592 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,592 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,592 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,592 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,592 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:46,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35805 is added to blk_1073741830_1006 (size=61332) 2024-11-24T02:55:46,594 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:35353 is added to blk_1073741830_1006 (size=61332) 2024-11-24T02:55:46,595 INFO [M:0;7c69a60bd8f6:41039 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:55:46,595 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:55:46,595 INFO [M:0;7c69a60bd8f6:41039 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:41039 2024-11-24T02:55:46,595 INFO [M:0;7c69a60bd8f6:41039 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:55:46,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:46,699 INFO [M:0;7c69a60bd8f6:41039 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:55:46,699 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:41039-0x1016ac4278c0000, quorum=127.0.0.1:54630, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:46,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@47bcda8c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:46,703 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@29765213{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:46,703 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:46,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@19fe8881{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:46,703 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@49490ce4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:46,705 WARN [BP-839236611-172.17.0.2-1732416906061 heartbeating to localhost/127.0.0.1:46357 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:55:46,705 WARN [BP-839236611-172.17.0.2-1732416906061 heartbeating to localhost/127.0.0.1:46357 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-839236611-172.17.0.2-1732416906061 (Datanode Uuid 85dbf609-e02d-45aa-b358-602e87b9aad6) service to localhost/127.0.0.1:46357 2024-11-24T02:55:46,705 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:55:46,705 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:55:46,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data3/current/BP-839236611-172.17.0.2-1732416906061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:46,706 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data4/current/BP-839236611-172.17.0.2-1732416906061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:46,707 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:55:46,713 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@23e1642c{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:46,714 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@767f877d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:46,714 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:46,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@18d1ee92{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:46,714 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@354bdaa4{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:46,715 WARN [BP-839236611-172.17.0.2-1732416906061 heartbeating to localhost/127.0.0.1:46357 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:55:46,715 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:55:46,715 WARN [BP-839236611-172.17.0.2-1732416906061 heartbeating to localhost/127.0.0.1:46357 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-839236611-172.17.0.2-1732416906061 (Datanode Uuid cd06f77c-4ff5-4b2d-b0b3-a4d5ee5fbe1a) service to localhost/127.0.0.1:46357 2024-11-24T02:55:46,715 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:55:46,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data1/current/BP-839236611-172.17.0.2-1732416906061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:46,716 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/cluster_3b7d7fc4-2995-ef1b-983b-6b0c71171b29/data/data2/current/BP-839236611-172.17.0.2-1732416906061 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:46,716 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:55:46,722 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@7b29c022{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:55:46,722 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@4e36d39c{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:46,722 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:46,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ce0132a{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:46,723 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ecf816b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:46,729 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:55:46,771 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:55:46,782 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRolling Thread=230 (was 206) Potentially hanging thread: nioEventLoopGroup-40-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46357 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46357 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-40-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-39-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46357 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-40-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46357 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-41-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:46357 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:46357 from jenkins.hfs.6 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-38-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:46357 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-38-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-14-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.6@localhost:46357 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-41-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-15-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=517 (was 483) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=164 (was 151) - SystemLoadAverage LEAK? -, ProcessCount=11 (was 11), AvailableMemoryMB=9504 (was 9993) 2024-11-24T02:55:46,791 INFO [Time-limited test {}] hbase.ResourceChecker(147): before: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=230, OpenFileDescriptor=517, MaxFileDescriptor=1048576, SystemLoadAverage=164, ProcessCount=11, AvailableMemoryMB=9504 2024-11-24T02:55:46,791 INFO [Time-limited test {}] hbase.HBaseTestingUtil(805): Starting up minicluster with option: StartMiniClusterOption{numMasters=1, masterClass=null, numRegionServers=1, rsPorts=, rsClass=null, numDataNodes=2, dataNodeHosts=null, numZkServers=1, createRootDir=false, createWALDir=false} 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.log.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.log.dir so I do NOT create it in target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(401): System.getProperty("hadoop.tmp.dir") already set to: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/0851571f-bf69-bdc2-55d9-0c40f1b88e77/hadoop.tmp.dir so I do NOT create it in target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseZKTestingUtil(84): Created new mini-cluster data directory: /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c, deleteOnExit=true 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(818): STARTING DFS 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting test.cache.data to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/test.cache.data in system properties and HBase conf 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.tmp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.tmp.dir in system properties and HBase conf 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting hadoop.log.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.log.dir in system properties and HBase conf 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.local.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/mapreduce.cluster.local.dir in system properties and HBase conf 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting mapreduce.cluster.temp.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/mapreduce.cluster.temp.dir in system properties and HBase conf 2024-11-24T02:55:46,792 INFO [Time-limited test {}] hbase.HBaseTestingUtil(738): read short circuit is OFF 2024-11-24T02:55:46,792 DEBUG [Time-limited test {}] fs.HFileSystem(310): The file system is not a DistributedFileSystem. Skipping on block location reordering 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-labels.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/yarn.node-labels.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.node-attribute.fs-store.root-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/yarn.node-attribute.fs-store.root-dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.log-dirs to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/yarn.nodemanager.log-dirs in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.active-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/yarn.timeline-service.entity-group-fs-store.active-dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.timeline-service.entity-group-fs-store.done-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/yarn.timeline-service.entity-group-fs-store.done-dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting yarn.nodemanager.remote-app-log-dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/yarn.nodemanager.remote-app-log-dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.datanode.shared.file.descriptor.paths to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/dfs.datanode.shared.file.descriptor.paths in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting nfs.dump.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/nfs.dump.dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting java.io.tmpdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/java.io.tmpdir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.journalnode.edits.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/dfs.journalnode.edits.dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting dfs.provided.aliasmap.inmemory.leveldb.dir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/dfs.provided.aliasmap.inmemory.leveldb.dir in system properties and HBase conf 2024-11-24T02:55:46,793 INFO [Time-limited test {}] hbase.HBaseTestingUtil(751): Setting fs.s3a.committer.staging.tmp.path to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/fs.s3a.committer.staging.tmp.path in system properties and HBase conf 2024-11-24T02:55:46,806 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:55:46,843 INFO [regionserver/7c69a60bd8f6:0.leaseChecker {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:55:47,164 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:47,167 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:55:47,168 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:55:47,169 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:55:47,169 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:55:47,169 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:47,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2482618b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:55:47,170 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@61266769{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:55:47,276 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@1e56b299{hdfs,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/java.io.tmpdir/jetty-localhost-41087-hadoop-hdfs-3_4_1-tests_jar-_-any-8459845655712137034/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:55:47,276 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@223a801d{HTTP/1.1, (http/1.1)}{localhost:41087} 2024-11-24T02:55:47,277 INFO [Time-limited test {}] server.Server(415): Started @291018ms 2024-11-24T02:55:47,290 WARN [Time-limited test {}] blockmanagement.DatanodeManager(468): The given interval for marking stale datanode = 30000, which is larger than heartbeat expire interval 20000. 2024-11-24T02:55:47,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:47,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:47,576 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:47,579 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:55:47,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:55:47,580 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:55:47,580 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 660000ms 2024-11-24T02:55:47,581 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@1f1185ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:55:47,581 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@5ec9da7e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:55:47,701 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@4bb82bb7{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/java.io.tmpdir/jetty-localhost-35573-hadoop-hdfs-3_4_1-tests_jar-_-any-10168279265690740076/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:47,701 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@6a35d82b{HTTP/1.1, (http/1.1)}{localhost:35573} 2024-11-24T02:55:47,701 INFO [Time-limited test {}] server.Server(415): Started @291442ms 2024-11-24T02:55:47,702 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:55:47,754 WARN [Time-limited test {}] server.AuthenticationFilter(240): Unable to initialize FileSignerSecretProvider, falling back to use random secrets. Reason: Could not read signature secret file: /home/jenkins/hadoop-http-auth-signature-secret 2024-11-24T02:55:47,757 INFO [Time-limited test {}] server.Server(375): jetty-9.4.53.v20231009; built: 2023-10-09T12:29:09.265Z; git: 27bde00a0b95a1d5bbee0eae7984f891d2d0f8c9; jvm 17.0.11+9 2024-11-24T02:55:47,761 INFO [Time-limited test {}] session.DefaultSessionIdManager(334): DefaultSessionIdManager workerName=node0 2024-11-24T02:55:47,761 INFO [Time-limited test {}] session.DefaultSessionIdManager(339): No SessionScavenger set, using defaults 2024-11-24T02:55:47,761 INFO [Time-limited test {}] session.HouseKeeper(132): node0 Scavenging every 600000ms 2024-11-24T02:55:47,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@2f95849a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.log.dir/,AVAILABLE} 2024-11-24T02:55:47,761 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.s.ServletContextHandler@20dd8a9c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,AVAILABLE} 2024-11-24T02:55:47,862 INFO [Time-limited test {}] handler.ContextHandler(921): Started o.e.j.w.WebAppContext@14628127{datanode,/,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/java.io.tmpdir/jetty-localhost-37445-hadoop-hdfs-3_4_1-tests_jar-_-any-15998750539260769225/webapp/,AVAILABLE}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:47,863 INFO [Time-limited test {}] server.AbstractConnector(333): Started ServerConnector@5982a43f{HTTP/1.1, (http/1.1)}{localhost:37445} 2024-11-24T02:55:47,863 INFO [Time-limited test {}] server.Server(415): Started @291604ms 2024-11-24T02:55:47,864 WARN [Time-limited test {}] web.RestCsrfPreventionFilterHandler(75): Got null for restCsrfPreventionFilter - will not do any filtering. 2024-11-24T02:55:48,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:48,394 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:48,957 WARN [Thread-2488 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data2/current/BP-1833539678-172.17.0.2-1732416946809/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:48,957 WARN [Thread-2487 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data1/current/BP-1833539678-172.17.0.2-1732416946809/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:48,983 WARN [Thread-2451 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:55:48,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x262bf01077c0adf8 with lease ID 0xb5d9a2dbe7002823: Processing first storage report for DS-54817a07-91e5-4a1f-b6a1-b6ff60fd0ad1 from datanode DatanodeRegistration(127.0.0.1:36785, datanodeUuid=3589e288-70c8-49fa-baa9-6675a4dc530f, infoPort=45813, infoSecurePort=0, ipcPort=41753, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809) 2024-11-24T02:55:48,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x262bf01077c0adf8 with lease ID 0xb5d9a2dbe7002823: from storage DS-54817a07-91e5-4a1f-b6a1-b6ff60fd0ad1 node DatanodeRegistration(127.0.0.1:36785, datanodeUuid=3589e288-70c8-49fa-baa9-6675a4dc530f, infoPort=45813, infoSecurePort=0, ipcPort=41753, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809), blocks: 0, hasStaleStorage: true, processing time: 1 msecs, invalidatedBlocks: 0 2024-11-24T02:55:48,985 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x262bf01077c0adf8 with lease ID 0xb5d9a2dbe7002823: Processing first storage report for DS-591a2ba3-97d6-409d-8f5d-38cd8da5484d from datanode DatanodeRegistration(127.0.0.1:36785, datanodeUuid=3589e288-70c8-49fa-baa9-6675a4dc530f, infoPort=45813, infoSecurePort=0, ipcPort=41753, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809) 2024-11-24T02:55:48,985 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x262bf01077c0adf8 with lease ID 0xb5d9a2dbe7002823: from storage DS-591a2ba3-97d6-409d-8f5d-38cd8da5484d node DatanodeRegistration(127.0.0.1:36785, datanodeUuid=3589e288-70c8-49fa-baa9-6675a4dc530f, infoPort=45813, infoSecurePort=0, ipcPort=41753, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:55:49,170 WARN [Thread-2498 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data3/current/BP-1833539678-172.17.0.2-1732416946809/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:49,170 WARN [Thread-2499 {}] impl.BlockPoolSlice(347): dfsUsed file missing in /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data4/current/BP-1833539678-172.17.0.2-1732416946809/current, will proceed with Du for space computation calculation, 2024-11-24T02:55:49,197 WARN [Thread-2474 {}] datanode.DirectoryScanner(302): dfs.datanode.directoryscan.throttle.limit.ms.per.sec set to value above 1000 ms/sec. Assuming default value of -1 2024-11-24T02:55:49,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a75666685be02fe with lease ID 0xb5d9a2dbe7002824: Processing first storage report for DS-9997ce28-081d-48fe-99b3-3fb2869a0725 from datanode DatanodeRegistration(127.0.0.1:40949, datanodeUuid=5635e1f5-edd9-4d69-89b4-4dd458560d1b, infoPort=36739, infoSecurePort=0, ipcPort=33647, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809) 2024-11-24T02:55:49,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a75666685be02fe with lease ID 0xb5d9a2dbe7002824: from storage DS-9997ce28-081d-48fe-99b3-3fb2869a0725 node DatanodeRegistration(127.0.0.1:40949, datanodeUuid=5635e1f5-edd9-4d69-89b4-4dd458560d1b, infoPort=36739, infoSecurePort=0, ipcPort=33647, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809), blocks: 0, hasStaleStorage: true, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:55:49,199 INFO [Block report processor {}] blockmanagement.BlockManager(2940): BLOCK* processReport 0x8a75666685be02fe with lease ID 0xb5d9a2dbe7002824: Processing first storage report for DS-8d2f4069-80f8-4dd6-8ef7-67b7fe1aedb4 from datanode DatanodeRegistration(127.0.0.1:40949, datanodeUuid=5635e1f5-edd9-4d69-89b4-4dd458560d1b, infoPort=36739, infoSecurePort=0, ipcPort=33647, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809) 2024-11-24T02:55:49,199 INFO [Block report processor {}] blockmanagement.BlockManager(2972): BLOCK* processReport 0x8a75666685be02fe with lease ID 0xb5d9a2dbe7002824: from storage DS-8d2f4069-80f8-4dd6-8ef7-67b7fe1aedb4 node DatanodeRegistration(127.0.0.1:40949, datanodeUuid=5635e1f5-edd9-4d69-89b4-4dd458560d1b, infoPort=36739, infoSecurePort=0, ipcPort=33647, storageInfo=lv=-57;cid=testClusterID;nsid=515136994;c=1732416946809), blocks: 0, hasStaleStorage: false, processing time: 0 msecs, invalidatedBlocks: 0 2024-11-24T02:55:49,293 DEBUG [Time-limited test {}] hbase.HBaseTestingUtil(631): Setting hbase.rootdir to /home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b 2024-11-24T02:55:49,297 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(261): Started connectionTimeout=30000, dir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/zookeeper_0, clientPort=62587, secureClientPort=-1, dataDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/zookeeper_0/version-2, dataDirSize=457 dataLogDir=/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/zookeeper_0/version-2, dataLogSize=457 tickTime=2000, maxClientCnxns=300, minSessionTimeout=4000, maxSessionTimeout=40000, clientPortListenBacklog=-1, serverId=0 2024-11-24T02:55:49,298 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(286): Started MiniZooKeeperCluster and ran 'stat' on client port=62587 2024-11-24T02:55:49,298 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:49,300 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:55:49,311 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741825_1001 (size=7) 2024-11-24T02:55:49,312 INFO [Time-limited test {}] util.FSUtils(489): Created version file at hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f with version=8 2024-11-24T02:55:49,312 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1141): The hbase.fs.tmp.dir is set to hdfs://localhost:44189/user/jenkins/test-data/561be513-b7cf-e88d-7d10-7fbf22cc797f/hbase-staging 2024-11-24T02:55:49,315 INFO [Time-limited test {}] client.ConnectionUtils(128): master/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:55:49,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:49,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:49,315 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:55:49,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:49,315 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:55:49,315 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.MasterService, hbase.pb.RegionServerStatusService, hbase.pb.LockService, hbase.pb.HbckService, hbase.pb.ClientMetaService, hbase.pb.AdminService 2024-11-24T02:55:49,315 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:55:49,316 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:39871 2024-11-24T02:55:49,317 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=master:39871 connecting to ZooKeeper ensemble=127.0.0.1:62587 2024-11-24T02:55:49,372 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:398710x0, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:55:49,373 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): master:39871-0x1016ac4c7db0000 connected 2024-11-24T02:55:49,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:49,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:49,452 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:49,454 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:49,456 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:49,456 INFO [Time-limited test {}] master.HMaster(525): hbase.rootdir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f, hbase.cluster.distributed=false 2024-11-24T02:55:49,458 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:55:49,459 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=39871 2024-11-24T02:55:49,459 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=39871 2024-11-24T02:55:49,460 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=39871 2024-11-24T02:55:49,460 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=39871 2024-11-24T02:55:49,460 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=39871 2024-11-24T02:55:49,482 INFO [Time-limited test {}] client.ConnectionUtils(128): regionserver/7c69a60bd8f6:0 server-side Connection retries=45 2024-11-24T02:55:49,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated default.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:49,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated priority.RWQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=2, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:49,482 INFO [Time-limited test {}] ipc.RWQueueRpcExecutor(113): priority.RWQ.Fifo writeQueues=1 writeHandlers=1 readQueues=1 readHandlers=2 scanQueues=0 scanHandlers=0 2024-11-24T02:55:49,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated replication.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=3 2024-11-24T02:55:49,482 INFO [Time-limited test {}] ipc.RpcExecutor(188): Instantiated metaPriority.FPBQ.Fifo with queueClass=class java.util.concurrent.LinkedBlockingQueue; numCallQueues=1, maxQueueLength=30, handlerCount=1 2024-11-24T02:55:49,482 INFO [Time-limited test {}] ipc.RpcServerFactory(64): Creating org.apache.hadoop.hbase.ipc.NettyRpcServer hosting hbase.pb.ClientService, hbase.pb.AdminService, hbase.pb.ClientMetaService, hbase.pb.BootstrapNodeService 2024-11-24T02:55:49,482 INFO [Time-limited test {}] ipc.NettyRpcServer(309): Using org.apache.hbase.thirdparty.io.netty.buffer.PooledByteBufAllocator for buffer allocation 2024-11-24T02:55:49,483 INFO [Time-limited test {}] ipc.NettyRpcServer(191): Bind to /172.17.0.2:35109 2024-11-24T02:55:49,484 INFO [Time-limited test {}] zookeeper.RecoverableZooKeeper(137): Process identifier=regionserver:35109 connecting to ZooKeeper ensemble=127.0.0.1:62587 2024-11-24T02:55:49,485 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:49,487 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:49,493 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:351090x0, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=None, state=SyncConnected, path=null 2024-11-24T02:55:49,494 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:351090x0, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:49,494 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKWatcher(635): regionserver:35109-0x1016ac4c7db0001 connected 2024-11-24T02:55:49,494 INFO [Time-limited test {}] hfile.BlockCacheFactory(123): Allocating BlockCache size=880 MB, blockSize=64 KB 2024-11-24T02:55:49,494 DEBUG [Time-limited test {}] mob.MobFileCache(124): MobFileCache enabled with cacheSize=1000, evictPeriods=3600sec, evictRemainRatio=0.5 2024-11-24T02:55:49,495 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/master 2024-11-24T02:55:49,496 DEBUG [Time-limited test {}] zookeeper.ZKUtil(113): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/acl 2024-11-24T02:55:49,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=default.FPBQ.Fifo, numCallQueues=1, port=35109 2024-11-24T02:55:49,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=priority.RWQ.Fifo.write, numCallQueues=1, port=35109 2024-11-24T02:55:49,496 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=priority.RWQ.Fifo.read, numCallQueues=1, port=35109 2024-11-24T02:55:49,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=3 with threadPrefix=replication.FPBQ.Fifo, numCallQueues=1, port=35109 2024-11-24T02:55:49,497 DEBUG [Time-limited test {}] ipc.RpcExecutor(290): Started handlerCount=1 with threadPrefix=metaPriority.FPBQ.Fifo, numCallQueues=1, port=35109 2024-11-24T02:55:49,512 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:M:0;7c69a60bd8f6:39871 2024-11-24T02:55:49,512 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(2510): Adding backup master ZNode /hbase/backup-masters/7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:49,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:49,525 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:49,525 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on existing znode=/hbase/backup-masters/7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:49,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/master 2024-11-24T02:55:49,536 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,536 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(111): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on existing znode=/hbase/master 2024-11-24T02:55:49,537 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(245): Deleting ZNode for /hbase/backup-masters/7c69a60bd8f6,39871,1732416949314 from backup master directory 2024-11-24T02:55:49,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/backup-masters/7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:49,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:49,546 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/backup-masters 2024-11-24T02:55:49,546 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:55:49,546 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ActiveMasterManager(255): Registered as active master=7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:49,550 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(620): Create cluster ID file [hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/hbase.id] with ID: 4ca5f530-1e74-4edb-a4ed-46e24600ff91 2024-11-24T02:55:49,550 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(625): Write the cluster ID file to a temporary location: hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/.tmp/hbase.id 2024-11-24T02:55:49,556 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:55:49,557 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741826_1002 (size=42) 2024-11-24T02:55:49,557 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSUtils(634): Move the temporary cluster ID file to its target location [hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/.tmp/hbase.id]:[hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/hbase.id] 2024-11-24T02:55:49,569 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:49,569 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(270): Fetching table descriptors from the filesystem. 2024-11-24T02:55:49,570 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] util.FSTableDescriptors(299): Fetched table descriptors(size=0) cost 1ms. 2024-11-24T02:55:49,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,578 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,585 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:55:49,586 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741827_1003 (size=196) 2024-11-24T02:55:49,587 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(370): Create or load local region for table 'master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} 2024-11-24T02:55:49,588 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(132): Injected flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000 2024-11-24T02:55:49,588 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:55:49,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:55:49,595 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741828_1004 (size=1189) 2024-11-24T02:55:49,596 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7590): Creating {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, tableDescriptor='master:store', {TABLE_ATTRIBUTES => {METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, under table dir hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store 2024-11-24T02:55:49,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:55:49,601 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741829_1005 (size=34) 2024-11-24T02:55:49,602 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:49,602 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:55:49,602 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:49,602 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:49,602 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:55:49,602 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:49,602 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:49,602 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416949602Disabling compacts and flushes for region at 1732416949602Disabling writes for close at 1732416949602Writing region close event to WAL at 1732416949602Closed at 1732416949602 2024-11-24T02:55:49,603 WARN [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(249): failed to clean up initializing flag: hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/.initializing 2024-11-24T02:55:49,603 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegion(219): WALDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/WALs/7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:49,605 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C39871%2C1732416949314, suffix=, logDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/WALs/7c69a60bd8f6,39871,1732416949314, archiveDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/oldWALs, maxLogs=10 2024-11-24T02:55:49,605 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C39871%2C1732416949314.1732416949605 2024-11-24T02:55:49,609 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/WALs/7c69a60bd8f6,39871,1732416949314/7c69a60bd8f6%2C39871%2C1732416949314.1732416949605 2024-11-24T02:55:49,610 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36739:36739),(127.0.0.1/127.0.0.1:45813:45813)] 2024-11-24T02:55:49,611 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7752): Opening region: {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:55:49,611 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(898): Instantiated master:store,,1.1595e783b53d99cd5eef43b6debb2682.; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:49,611 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7794): checking encryption for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,611 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(7797): checking classloading for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,612 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,613 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName info 2024-11-24T02:55:49,613 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:49,614 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family proc of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName proc 2024-11-24T02:55:49,615 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/proc, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:49,615 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rs of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,616 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName rs 2024-11-24T02:55:49,616 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/rs, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:49,617 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family state of region 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1595e783b53d99cd5eef43b6debb2682 columnFamilyName state 2024-11-24T02:55:49,618 DEBUG [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,618 INFO [StoreOpener-1595e783b53d99cd5eef43b6debb2682-1 {}] regionserver.HStore(327): Store=1595e783b53d99cd5eef43b6debb2682/state, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=NONE, compression=NONE 2024-11-24T02:55:49,618 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1038): replaying wal for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,619 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,619 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,621 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1048): stopping wal replay for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,621 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1060): Cleaning up temporary data for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,621 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table master:store descriptor;using region.getMemStoreFlushHeapSize/# of families (32.0 M)) instead. 2024-11-24T02:55:49,622 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1093): writing seq id for 1595e783b53d99cd5eef43b6debb2682 2024-11-24T02:55:49,625 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:55:49,625 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1114): Opened 1595e783b53d99cd5eef43b6debb2682; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=268435456, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=769960, jitterRate=-0.020945683121681213}}}, FlushLargeStoresPolicy{flushSizeLowerBound=33554432} 2024-11-24T02:55:49,626 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] regionserver.HRegion(1006): Region open journal for 1595e783b53d99cd5eef43b6debb2682: Writing region info on filesystem at 1732416949611Initializing all the Stores at 1732416949612 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416949612Instantiating store for column family {NAME => 'proc', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416949612Instantiating store for column family {NAME => 'rs', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416949612Instantiating store for column family {NAME => 'state', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '1', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'NONE', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROW', IN_MEMORY => 'false', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416949612Cleaning up temporary data from old regions at 1732416949621 (+9 ms)Region opened successfully at 1732416949626 (+5 ms) 2024-11-24T02:55:49,626 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.MasterRegionFlusherAndCompactor(122): Constructor flushSize=134217728, flushPerChanges=1000000, flushIntervalMs=900000, compactMin=4 2024-11-24T02:55:49,629 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@11db3bb6, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:55:49,630 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(912): No meta location available on zookeeper, skip migrating... 2024-11-24T02:55:49,630 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(104): Starting the Region Procedure Store, number threads=5 2024-11-24T02:55:49,630 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(626): Starting 5 core workers (bigger of cpus/4 or 16) with max (burst) worker count=50 2024-11-24T02:55:49,631 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] region.RegionProcedureStore(255): Starting Region Procedure Store lease recovery... 2024-11-24T02:55:49,631 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(676): Recovered RegionProcedureStore lease in 0 msec 2024-11-24T02:55:49,632 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(690): Loaded RegionProcedureStore in 0 msec 2024-11-24T02:55:49,632 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.RemoteProcedureDispatcher(96): Instantiated, coreThreads=3 (allowCoreThreadTimeOut=true), queueMaxSize=32, operationDelay=150 2024-11-24T02:55:49,634 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.RegionServerTracker(127): Upgrading RegionServerTracker to active master mode; 0 have existingServerCrashProcedures, 0 possibly 'live' servers, and 0 'splitting'. 2024-11-24T02:55:49,634 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Unable to get data of znode /hbase/balancer because node does not exist (not necessarily an error) 2024-11-24T02:55:49,641 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/balancer already deleted, retry=false 2024-11-24T02:55:49,641 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.SimpleRegionNormalizer(163): Updated configuration for key 'hbase.normalizer.merge.min_region_size.mb' from 0 to 1 2024-11-24T02:55:49,642 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Unable to get data of znode /hbase/normalizer because node does not exist (not necessarily an error) 2024-11-24T02:55:49,651 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/normalizer already deleted, retry=false 2024-11-24T02:55:49,652 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] normalizer.RegionNormalizerWorker(137): Normalizer rate limit set to unlimited 2024-11-24T02:55:49,653 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Unable to get data of znode /hbase/switch/split because node does not exist (not necessarily an error) 2024-11-24T02:55:49,662 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/split already deleted, retry=false 2024-11-24T02:55:49,663 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Unable to get data of znode /hbase/switch/merge because node does not exist (not necessarily an error) 2024-11-24T02:55:49,672 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/switch/merge already deleted, retry=false 2024-11-24T02:55:49,675 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKUtil(444): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Unable to get data of znode /hbase/snapshot-cleanup because node does not exist (not necessarily an error) 2024-11-24T02:55:49,683 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/snapshot-cleanup already deleted, retry=false 2024-11-24T02:55:49,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:49,693 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeCreated, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:49,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,694 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,694 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(856): Active/primary master=7c69a60bd8f6,39871,1732416949314, sessionid=0x1016ac4c7db0000, setting cluster-up flag (Was=false) 2024-11-24T02:55:49,714 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,715 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,746 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/flush-table-proc/acquired, /hbase/flush-table-proc/reached, /hbase/flush-table-proc/abort 2024-11-24T02:55:49,747 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:49,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,767 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:49,799 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureUtil(251): Clearing all znodes /hbase/online-snapshot/acquired, /hbase/online-snapshot/reached, /hbase/online-snapshot/abort 2024-11-24T02:55:49,800 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure.ZKProcedureCoordinator(245): Starting controller for procedure member=7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:49,801 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(1185): No .lastflushedseqids found at hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/.lastflushedseqids will record last flushed sequence id for regions by regionserver report all over again 2024-11-24T02:55:49,803 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.ProcedureExecutor(1139): Stored pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=false; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:49,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.BaseLoadBalancer(416): slop=0.2 2024-11-24T02:55:49,803 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.StochasticLoadBalancer(272): Loaded config; maxSteps=1000000, runMaxSteps=false, stepsPerRegion=800, maxRunningTime=30000, isByTable=false, CostFunctions=[RegionCountSkewCostFunction, PrimaryRegionCountSkewCostFunction, MoveCostFunction, ServerLocalityCostFunction, RackLocalityCostFunction, TableSkewCostFunction, RegionReplicaHostCostFunction, RegionReplicaRackCostFunction, ReadRequestCostFunction, CPRequestCostFunction, WriteRequestCostFunction, MemStoreSizeCostFunction, StoreFileCostFunction] , sum of multiplier of cost functions = 0.0 etc. 2024-11-24T02:55:49,804 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] balancer.RegionHDFSBlockLocationFinder(133): Skipping locality-based refresh due to oldStatus=null, newStatus=Master: 7c69a60bd8f6,39871,1732416949314 Number of backup masters: 0 Number of live region servers: 0 Number of dead region servers: 0 Number of unknown region servers: 0 Average load: 0.0 Number of requests: 0 Number of regions: 0 Number of regions in transition: 0 2024-11-24T02:55:49,805 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_OPEN_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:49,805 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_CLOSE_REGION-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:49,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:49,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_META_SERVER_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=5, maxPoolSize=5 2024-11-24T02:55:49,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=M_LOG_REPLAY_OPS-master/7c69a60bd8f6:0, corePoolSize=10, maxPoolSize=10 2024-11-24T02:55:49,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_SNAPSHOT_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_MERGE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:55:49,806 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] executor.ExecutorService(95): Starting executor service name=MASTER_TABLE_OPERATIONS-master/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.procedure2.CompletedProcedureCleaner; timeout=30000, timestamp=1732416979807 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): log_cleaner Cleaner pool size is 1 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveLogCleaner 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.replication.master.ReplicationLogCleaner 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreWALCleaner 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveProcedureWALCleaner 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.LogCleaner(148): Creating 1 old WALs cleaner threads 2024-11-24T02:55:49,807 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=LogsCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,807 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_WRITE_FS_LAYOUT, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:49,808 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(76): BOOTSTRAP: creating hbase:meta region 2024-11-24T02:55:49,808 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.DirScanPool(74): hfile_cleaner Cleaner pool size is 2 2024-11-24T02:55:49,808 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveMasterLocalStoreHFileCleaner 2024-11-24T02:55:49,808 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.HFileLinkCleaner 2024-11-24T02:55:49,808 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.snapshot.SnapshotHFileCleaner 2024-11-24T02:55:49,808 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.CleanerChore(192): Initialize cleaner=org.apache.hadoop.hbase.master.cleaner.TimeToLiveHFileCleaner 2024-11-24T02:55:49,809 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(260): Starting for large file=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416949808,5,FailOnTimeoutGroup] 2024-11-24T02:55:49,809 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] cleaner.HFileCleaner(275): Starting for small files=Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416949809,5,FailOnTimeoutGroup] 2024-11-24T02:55:49,809 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HFileCleaner, period=600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,809 DEBUG [PEWorker-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,809 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1741): Reopening regions with very high storeFileRefCount is disabled. Provide threshold value > 0 for hbase.regions.recovery.store.file.ref.count to enable it. 2024-11-24T02:55:49,809 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationBarrierCleaner, period=43200000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,809 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=SnapshotCleaner, period=1800000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,809 INFO [PEWorker-1 {}] util.FSTableDescriptors(156): Creating new hbase:meta table descriptor 'hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} 2024-11-24T02:55:49,815 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:55:49,816 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741831_1007 (size=1321) 2024-11-24T02:55:49,817 INFO [PEWorker-1 {}] util.FSTableDescriptors(163): Updated hbase:meta table descriptor to hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/.tabledesc/.tableinfo.0000000001.1321 2024-11-24T02:55:49,817 INFO [PEWorker-1 {}] regionserver.HRegion(7572): creating {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''}, tableDescriptor='hbase:meta', {TABLE_ATTRIBUTES => {IS_META => 'true', coprocessor$1 => '|org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint|536870911|', METADATA => {'hbase.store.file-tracker.impl' => 'DEFAULT'}}}, {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'}, {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'}, regionDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f 2024-11-24T02:55:49,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:55:49,823 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741832_1008 (size=32) 2024-11-24T02:55:49,824 DEBUG [PEWorker-1 {}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:49,826 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:55:49,827 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:55:49,827 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:49,827 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:55:49,829 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:55:49,829 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:49,829 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:55:49,830 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:55:49,830 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:49,831 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:55:49,832 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:55:49,832 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:49,833 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:49,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:55:49,833 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740 2024-11-24T02:55:49,834 DEBUG [PEWorker-1 {}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740 2024-11-24T02:55:49,835 DEBUG [PEWorker-1 {}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:55:49,835 DEBUG [PEWorker-1 {}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:55:49,835 DEBUG [PEWorker-1 {}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:55:49,836 DEBUG [PEWorker-1 {}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:55:49,838 DEBUG [PEWorker-1 {}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740/recovered.edits/1.seqid, newMaxSeqId=1, maxSeqId=-1 2024-11-24T02:55:49,838 INFO [PEWorker-1 {}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=880931, jitterRate=0.12016202509403229}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:55:49,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1006): Region open journal for 1588230740: Writing region info on filesystem at 1732416949824Initializing all the Stores at 1732416949825 (+1 ms)Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416949825Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416949825Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416949825Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416949825Cleaning up temporary data from old regions at 1732416949835 (+10 ms)Region opened successfully at 1732416949839 (+4 ms) 2024-11-24T02:55:49,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:55:49,839 INFO [PEWorker-1 {}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:55:49,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:55:49,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:55:49,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:55:49,839 INFO [PEWorker-1 {}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:49,839 DEBUG [PEWorker-1 {}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416949839Disabling compacts and flushes for region at 1732416949839Disabling writes for close at 1732416949839Writing region close event to WAL at 1732416949839Closed at 1732416949839 2024-11-24T02:55:49,840 DEBUG [PEWorker-1 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_ASSIGN_META, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:49,840 INFO [PEWorker-1 {}] procedure.InitMetaProcedure(108): Going to assign meta 2024-11-24T02:55:49,841 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN}] 2024-11-24T02:55:49,842 INFO [PEWorker-2 {}] procedure.MasterProcedureScheduler(851): Took xlock for pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:55:49,842 INFO [PEWorker-2 {}] assignment.TransitRegionStateProcedure(269): Starting pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_GET_ASSIGN_CANDIDATE, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN; state=OFFLINE, location=null; forceNewPlan=false, retain=false 2024-11-24T02:55:49,898 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(746): ClusterId : 4ca5f530-1e74-4edb-a4ed-46e24600ff91 2024-11-24T02:55:49,898 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure flush-table-proc initializing 2024-11-24T02:55:49,905 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure flush-table-proc initialized 2024-11-24T02:55:49,905 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(43): Procedure online-snapshot initializing 2024-11-24T02:55:49,915 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(45): Procedure online-snapshot initialized 2024-11-24T02:55:49,915 DEBUG [RS:0;7c69a60bd8f6:35109 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@10d5934f, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=7c69a60bd8f6/172.17.0.2:0 2024-11-24T02:55:49,930 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.ShutdownHook(81): Installed shutdown hook thread: Shutdownhook:RS:0;7c69a60bd8f6:35109 2024-11-24T02:55:49,930 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.RegionServerCoprocessorHost(66): System coprocessor loading is enabled 2024-11-24T02:55:49,930 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.RegionServerCoprocessorHost(67): Table coprocessor loading is enabled 2024-11-24T02:55:49,930 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(832): About to register with Master. 2024-11-24T02:55:49,931 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(2659): reportForDuty to master=7c69a60bd8f6,39871,1732416949314 with port=35109, startcode=1732416949481 2024-11-24T02:55:49,931 DEBUG [RS:0;7c69a60bd8f6:35109 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=RegionServerStatusService, sasl=false 2024-11-24T02:55:49,933 INFO [HMaster-EventLoopGroup-16-2 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:34343, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins.hfs.7 (auth:SIMPLE), service=RegionServerStatusService 2024-11-24T02:55:49,934 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39871 {}] master.ServerManager(363): Checking decommissioned status of RegionServer 7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:49,934 INFO [RpcServer.priority.RWQ.Fifo.write.handler=0,queue=0,port=39871 {}] master.ServerManager(517): Registering regionserver=7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:49,935 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1440): Config from master: hbase.rootdir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f 2024-11-24T02:55:49,935 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1440): Config from master: fs.defaultFS=hdfs://localhost:36367 2024-11-24T02:55:49,935 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1440): Config from master: hbase.master.info.port=-1 2024-11-24T02:55:49,946 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:55:49,946 DEBUG [RS:0;7c69a60bd8f6:35109 {}] zookeeper.ZKUtil(111): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on existing znode=/hbase/rs/7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:49,947 WARN [RS:0;7c69a60bd8f6:35109 {}] hbase.ZNodeClearer(65): Environment variable HBASE_ZNODE_FILE not set; znodes will not be cleared on crash by start scripts (Longer MTTR!) 2024-11-24T02:55:49,947 INFO [RS:0;7c69a60bd8f6:35109 {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:55:49,947 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1793): logDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:49,947 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(179): RegionServer ephemeral node created, adding [7c69a60bd8f6,35109,1732416949481] 2024-11-24T02:55:49,950 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.MetricsRegionServerWrapperImpl(120): Computing regionserver metrics every 5000 milliseconds 2024-11-24T02:55:49,952 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.MemStoreFlusher(131): globalMemStoreLimit=880 M, globalMemStoreLimitLowMark=836 M, Offheap=false 2024-11-24T02:55:49,953 INFO [RS:0;7c69a60bd8f6:35109 {}] throttle.PressureAwareCompactionThroughputController(131): Compaction throughput configurations, higher bound: 100.00 MB/second, lower bound 50.00 MB/second, off peak: unlimited, tuning period: 60000 ms 2024-11-24T02:55:49,953 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,953 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer$CompactionChecker(1680): CompactionChecker runs every PT1S 2024-11-24T02:55:49,954 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ExecutorStatusChore(48): ExecutorStatusChore runs every 1mins, 0sec 2024-11-24T02:55:49,954 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactedHFilesCleaner, period=120000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_OPEN_PRIORITY_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_REGION-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLOSE_META-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_LOG_REPLAY_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=2, maxPoolSize=2 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_COMPACTED_FILES_DISCHARGER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_REGION_REPLICA_FLUSH_OPS-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_REFRESH_PEER-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,954 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_REPLAY_SYNC_REPLICATION_WAL-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,955 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_SWITCH_RPC_THROTTLE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,955 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_CLAIM_REPLICATION_QUEUE-regionserver/7c69a60bd8f6:0, corePoolSize=1, maxPoolSize=1 2024-11-24T02:55:49,955 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_SNAPSHOT_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:55:49,955 DEBUG [RS:0;7c69a60bd8f6:35109 {}] executor.ExecutorService(95): Starting executor service name=RS_FLUSH_OPERATIONS-regionserver/7c69a60bd8f6:0, corePoolSize=3, maxPoolSize=3 2024-11-24T02:55:49,955 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=CompactionChecker, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,955 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=MemstoreFlusherChore, period=1000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,955 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ExecutorStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,955 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=nonceCleaner, period=360000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,955 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=BrokenStoreFileCleaner, period=21600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,955 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35109,1732416949481-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:55:49,972 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HeapMemoryManager(209): Starting, tuneOn=false 2024-11-24T02:55:49,972 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,35109,1732416949481-HeapMemoryTunerChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,972 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,972 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.Replication(171): 7c69a60bd8f6,35109,1732416949481 started 2024-11-24T02:55:49,987 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(168): Chore ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:49,987 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1482): Serving as 7c69a60bd8f6,35109,1732416949481, RpcServer on 7c69a60bd8f6/172.17.0.2:35109, sessionid=0x1016ac4c7db0001 2024-11-24T02:55:49,987 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure flush-table-proc starting 2024-11-24T02:55:49,987 DEBUG [RS:0;7c69a60bd8f6:35109 {}] flush.RegionServerFlushTableProcedureManager(108): Start region server flush procedure manager 7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:49,987 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,35109,1732416949481' 2024-11-24T02:55:49,987 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/flush-table-proc/abort' 2024-11-24T02:55:49,988 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/flush-table-proc/acquired' 2024-11-24T02:55:49,988 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure flush-table-proc started 2024-11-24T02:55:49,988 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(51): Procedure online-snapshot starting 2024-11-24T02:55:49,988 DEBUG [RS:0;7c69a60bd8f6:35109 {}] snapshot.RegionServerSnapshotManager(126): Start Snapshot Manager 7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:49,988 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.ZKProcedureMemberRpcs(357): Starting procedure member '7c69a60bd8f6,35109,1732416949481' 2024-11-24T02:55:49,988 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.ZKProcedureMemberRpcs(134): Checking for aborted procedures on node: '/hbase/online-snapshot/abort' 2024-11-24T02:55:49,988 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.ZKProcedureMemberRpcs(154): Looking for new procedures under znode:'/hbase/online-snapshot/acquired' 2024-11-24T02:55:49,989 DEBUG [RS:0;7c69a60bd8f6:35109 {}] procedure.RegionServerProcedureManagerHost(53): Procedure online-snapshot started 2024-11-24T02:55:49,989 INFO [RS:0;7c69a60bd8f6:35109 {}] quotas.RegionServerRpcQuotaManager(64): Quota support disabled 2024-11-24T02:55:49,989 INFO [RS:0;7c69a60bd8f6:35109 {}] quotas.RegionServerSpaceQuotaManager(80): Quota support disabled, not starting space quota manager. 2024-11-24T02:55:49,992 WARN [7c69a60bd8f6:39871 {}] assignment.AssignmentManager(2451): No servers available; cannot place 1 unassigned regions. 2024-11-24T02:55:50,091 INFO [RS:0;7c69a60bd8f6:35109 {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C35109%2C1732416949481, suffix=, logDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/7c69a60bd8f6,35109,1732416949481, archiveDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/oldWALs, maxLogs=32 2024-11-24T02:55:50,092 INFO [RS:0;7c69a60bd8f6:35109 {}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C35109%2C1732416949481.1732416950092 2024-11-24T02:55:50,100 INFO [RS:0;7c69a60bd8f6:35109 {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/7c69a60bd8f6,35109,1732416949481/7c69a60bd8f6%2C35109%2C1732416949481.1732416950092 2024-11-24T02:55:50,101 DEBUG [RS:0;7c69a60bd8f6:35109 {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36739:36739),(127.0.0.1/127.0.0.1:45813:45813)] 2024-11-24T02:55:50,243 DEBUG [7c69a60bd8f6:39871 {}] assignment.AssignmentManager(2472): Processing assignQueue; systemServersCount=1, allServersCount=1 2024-11-24T02:55:50,243 INFO [PEWorker-3 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPENING, regionLocation=7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:50,245 INFO [PEWorker-3 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,35109,1732416949481, state=OPENING 2024-11-24T02:55:50,315 DEBUG [PEWorker-3 {}] zookeeper.MetaTableLocator(183): hbase:meta region location doesn't exist, create it 2024-11-24T02:55:50,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:50,325 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:50,326 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:50,326 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:50,326 DEBUG [PEWorker-3 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=2, ppid=1, state=RUNNABLE:REGION_STATE_TRANSITION_OPEN, hasLock=true; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN 2024-11-24T02:55:50,326 INFO [PEWorker-3 {}] procedure2.ProcedureExecutor(1860): Initialized subprocedures=[{pid=3, ppid=2, state=RUNNABLE, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,35109,1732416949481}] 2024-11-24T02:55:50,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,34067,1732416765209/7c69a60bd8f6%2C34067%2C1732416765209.meta.1732416766765.meta java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:50,395 WARN [Close-WAL-Writer-0 {}] util.RecoverLeaseFSUtils(258): Failed invocation for hdfs://localhost:46305/user/jenkins/test-data/232322a7-be11-4e49-f4b5-7da9069d1aad/WALs/7c69a60bd8f6,44053,1732416766982/7c69a60bd8f6%2C44053%2C1732416766982.1732416767280 java.lang.reflect.InvocationTargetException: null at jdk.internal.reflect.GeneratedMethodAccessor117.invoke(Unknown Source) ~[?:?] at jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) ~[?:?] at java.lang.reflect.Method.invoke(Method.java:568) ~[?:?] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.isFileClosed(RecoverLeaseFSUtils.java:254) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverDFSFileLease(RecoverLeaseFSUtils.java:186) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.util.RecoverLeaseFSUtils.recoverFileLease(RecoverLeaseFSUtils.java:96) ~[hbase-asyncfs-4.0.0-alpha-1-SNAPSHOT.jar:4.0.0-alpha-1-SNAPSHOT] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.recoverLease(AbstractFSWAL.java:2031) ~[classes/:?] at org.apache.hadoop.hbase.regionserver.wal.AbstractFSWAL.lambda$closeWriter$17(AbstractFSWAL.java:2044) ~[classes/:?] at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1136) ~[?:?] at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:635) ~[?:?] at java.lang.Thread.run(Thread.java:840) ~[?:?] Caused by: java.io.IOException: Filesystem closed at org.apache.hadoop.hdfs.DFSClient.checkOpen(DFSClient.java:490) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DFSClient.isFileClosed(DFSClient.java:1808) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2539) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem$46.doCall(DistributedFileSystem.java:2536) ~[hadoop-hdfs-client-3.4.1.jar:?] at org.apache.hadoop.fs.FileSystemLinkResolver.resolve(FileSystemLinkResolver.java:81) ~[hadoop-common-3.4.1.jar:?] at org.apache.hadoop.hdfs.DistributedFileSystem.isFileClosed(DistributedFileSystem.java:2554) ~[hadoop-hdfs-client-3.4.1.jar:?] ... 11 more 2024-11-24T02:55:50,478 DEBUG [RSProcedureDispatcher-pool-0 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=AdminService, sasl=false 2024-11-24T02:55:50,480 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:41181, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=AdminService 2024-11-24T02:55:50,484 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(132): Open hbase:meta,,1.1588230740 2024-11-24T02:55:50,484 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:55:50,485 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=7c69a60bd8f6%2C35109%2C1732416949481.meta, suffix=.meta, logDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/7c69a60bd8f6,35109,1732416949481, archiveDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/oldWALs, maxLogs=32 2024-11-24T02:55:50,486 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] monitor.StreamSlowMonitor(122): New stream slow monitor 7c69a60bd8f6%2C35109%2C1732416949481.meta.1732416950486.meta 2024-11-24T02:55:50,503 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/7c69a60bd8f6,35109,1732416949481/7c69a60bd8f6%2C35109%2C1732416949481.meta.1732416950486.meta 2024-11-24T02:55:50,511 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36739:36739),(127.0.0.1/127.0.0.1:45813:45813)] 2024-11-24T02:55:50,517 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7752): Opening region: {ENCODED => 1588230740, NAME => 'hbase:meta,,1', STARTKEY => '', ENDKEY => ''} 2024-11-24T02:55:50,517 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] coprocessor.CoprocessorHost(215): Loading coprocessor class org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint with path null and priority 536870911 2024-11-24T02:55:50,517 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(8280): Registered coprocessor service: region=hbase:meta,,1 service=MultiRowMutationService 2024-11-24T02:55:50,517 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.RegionCoprocessorHost(434): Loaded coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint from HTD of hbase:meta successfully. 2024-11-24T02:55:50,518 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.MetricsRegionSourceImpl(79): Creating new MetricsRegionSourceImpl for table meta 1588230740 2024-11-24T02:55:50,518 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(898): Instantiated hbase:meta,,1.1588230740; StoreHotnessProtector, parallelPutToStoreThreadLimit=0 ; minColumnNum=100 ; preparePutThreadLimit=0 ; hotProtect now disable 2024-11-24T02:55:50,518 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7794): checking encryption for 1588230740 2024-11-24T02:55:50,518 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(7797): checking classloading for 1588230740 2024-11-24T02:55:50,519 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family info of region 1588230740 2024-11-24T02:55:50,519 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName info 2024-11-24T02:55:50,519 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:50,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/info, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:50,520 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family ns of region 1588230740 2024-11-24T02:55:50,520 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName ns 2024-11-24T02:55:50,520 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:50,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/ns, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:50,521 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family rep_barrier of region 1588230740 2024-11-24T02:55:50,521 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName rep_barrier 2024-11-24T02:55:50,521 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:50,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/rep_barrier, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:50,522 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(400): Created cacheConfig: cacheDataOnRead=true, cacheDataOnWrite=false, cacheIndexesOnWrite=false, cacheBloomsOnWrite=false, cacheEvictOnClose=false, cacheDataCompressed=false, prefetchOnOpen=false, for column family table of region 1588230740 2024-11-24T02:55:50,522 INFO [StoreOpener-1588230740-1 {}] compactions.CompactionConfiguration(183): size [minCompactSize:128 MB, maxCompactSize:8.00 EB, offPeakMaxCompactSize:8.00 EB); files [minFilesToCompact:3, maxFilesToCompact:10); ratio 1.200000; off-peak ratio 5.000000; throttle point 2684354560; major period 604800000, major jitter 0.500000, min locality to compact 0.000000; tiered compaction: max_age 9223372036854775807, incoming window min 6, compaction policy for tiered window org.apache.hadoop.hbase.regionserver.compactions.ExploringCompactionPolicy, single output for minor true, compaction window factory org.apache.hadoop.hbase.regionserver.compactions.ExponentialCompactionWindowFactory, region 1588230740 columnFamilyName table 2024-11-24T02:55:50,522 DEBUG [StoreOpener-1588230740-1 {}] storefiletracker.StoreFileTrackerFactory(122): instantiating StoreFileTracker impl org.apache.hadoop.hbase.regionserver.storefiletracker.DefaultStoreFileTracker 2024-11-24T02:55:50,523 INFO [StoreOpener-1588230740-1 {}] regionserver.HStore(327): Store=1588230740/table, memstore type=DefaultMemStore, storagePolicy=NONE, verifyBulkLoads=false, parallelPutCountPrintThreshold=50, encoding=ROW_INDEX_V1, compression=NONE 2024-11-24T02:55:50,523 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1038): replaying wal for 1588230740 2024-11-24T02:55:50,524 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740 2024-11-24T02:55:50,524 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(5546): Found 0 recovered edits file(s) under hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740 2024-11-24T02:55:50,525 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1048): stopping wal replay for 1588230740 2024-11-24T02:55:50,525 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1060): Cleaning up temporary data for 1588230740 2024-11-24T02:55:50,526 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.FlushLargeStoresPolicy(65): No hbase.hregion.percolumnfamilyflush.size.lower.bound set in table hbase:meta descriptor;using region.getMemStoreFlushHeapSize/# of families (16.0 M)) instead. 2024-11-24T02:55:50,527 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1093): writing seq id for 1588230740 2024-11-24T02:55:50,527 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1114): Opened 1588230740; next sequenceid=2; SteppingSplitPolicysuper{IncreasingToUpperBoundRegionSplitPolicy{initialSize=16384, ConstantSizeRegionSplitPolicy{desiredMaxFileSize=845147, jitterRate=0.07466082274913788}}}, FlushLargeStoresPolicy{flushSizeLowerBound=16777216} 2024-11-24T02:55:50,527 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1122): Running coprocessor post-open hooks for 1588230740 2024-11-24T02:55:50,528 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegion(1006): Region open journal for 1588230740: Running coprocessor pre-open hook at 1732416950518Writing region info on filesystem at 1732416950518Initializing all the Stores at 1732416950518Instantiating store for column family {NAME => 'info', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416950518Instantiating store for column family {NAME => 'ns', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416950519 (+1 ms)Instantiating store for column family {NAME => 'rep_barrier', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '2147483647', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '65536 B (64KB)'} at 1732416950519Instantiating store for column family {NAME => 'table', INDEX_BLOCK_ENCODING => 'NONE', VERSIONS => '3', KEEP_DELETED_CELLS => 'FALSE', DATA_BLOCK_ENCODING => 'ROW_INDEX_V1', TTL => 'FOREVER', MIN_VERSIONS => '0', REPLICATION_SCOPE => '0', BLOOMFILTER => 'ROWCOL', IN_MEMORY => 'true', COMPRESSION => 'NONE', BLOCKCACHE => 'true', BLOCKSIZE => '8192 B (8KB)'} at 1732416950519Cleaning up temporary data from old regions at 1732416950525 (+6 ms)Running coprocessor post-open hooks at 1732416950527 (+2 ms)Region opened successfully at 1732416950528 (+1 ms) 2024-11-24T02:55:50,528 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2236): Post open deploy tasks for hbase:meta,,1.1588230740, pid=3, masterSystemTime=1732416950478 2024-11-24T02:55:50,530 DEBUG [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] regionserver.HRegionServer(2266): Finished post open deploy task for hbase:meta,,1.1588230740 2024-11-24T02:55:50,530 INFO [RS_OPEN_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_OPEN_META, pid=3}] handler.AssignRegionHandler(153): Opened hbase:meta,,1.1588230740 2024-11-24T02:55:50,531 INFO [PEWorker-5 {}] assignment.RegionStateStore(223): pid=2 updating hbase:meta row=1588230740, regionState=OPEN, openSeqNum=2, regionLocation=7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:50,532 INFO [PEWorker-5 {}] zookeeper.MetaTableLocator(171): Setting hbase:meta replicaId=0 location in ZooKeeper as 7c69a60bd8f6,35109,1732416949481, state=OPEN 2024-11-24T02:55:50,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:55:50,568 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeDataChanged, state=SyncConnected, path=/hbase/meta-region-server 2024-11-24T02:55:50,569 DEBUG [PEWorker-5 {}] procedure2.ProcedureFutureUtil(75): The future has completed while adding callback, give up suspending procedure pid=3, ppid=2, state=RUNNABLE, hasLock=true; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:50,569 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:50,569 DEBUG [zk-event-processor-pool-0 {}] hbase.MetaRegionLocationCache(167): Updating meta znode for path /hbase/meta-region-server: CHANGED 2024-11-24T02:55:50,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=3, resume processing ppid=2 2024-11-24T02:55:50,571 INFO [PEWorker-5 {}] procedure2.ProcedureExecutor(1521): Finished pid=3, ppid=2, state=SUCCESS, hasLock=false; OpenRegionProcedure 1588230740, server=7c69a60bd8f6,35109,1732416949481 in 243 msec 2024-11-24T02:55:50,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(2017): Finished subprocedure pid=2, resume processing ppid=1 2024-11-24T02:55:50,573 INFO [PEWorker-1 {}] procedure2.ProcedureExecutor(1521): Finished pid=2, ppid=1, state=SUCCESS, hasLock=false; TransitRegionStateProcedure table=hbase:meta, region=1588230740, ASSIGN in 730 msec 2024-11-24T02:55:50,574 DEBUG [PEWorker-2 {}] procedure.InitMetaProcedure(97): Execute pid=1, state=RUNNABLE:INIT_META_CREATE_NAMESPACES, hasLock=true; InitMetaProcedure table=hbase:meta 2024-11-24T02:55:50,574 INFO [PEWorker-2 {}] procedure.InitMetaProcedure(114): Going to create {NAME => 'default'} and {NAME => 'hbase'} namespaces 2024-11-24T02:55:50,575 DEBUG [PEWorker-2 {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:55:50,575 DEBUG [PEWorker-2 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,35109,1732416949481, seqNum=-1] 2024-11-24T02:55:50,575 DEBUG [PEWorker-2 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:55:50,576 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-3 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:60465, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:55:50,582 INFO [PEWorker-2 {}] procedure2.ProcedureExecutor(1521): Finished pid=1, state=SUCCESS, hasLock=false; InitMetaProcedure table=hbase:meta in 778 msec 2024-11-24T02:55:50,582 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1123): Wait for region servers to report in: status=status unset, state=RUNNING, startTime=1732416950582, completionTime=-1 2024-11-24T02:55:50,582 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ServerManager(903): Finished waiting on RegionServer count=1; waited=0ms, expected min=1 server(s), max=1 server(s), master is running 2024-11-24T02:55:50,582 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1764): Joining cluster... 2024-11-24T02:55:50,585 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1776): Number of RegionServers=1 2024-11-24T02:55:50,585 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$RegionInTransitionChore; timeout=60000, timestamp=1732417010585 2024-11-24T02:55:50,585 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] procedure2.TimeoutExecutorThread(82): ADDED pid=-1, state=WAITING_TIMEOUT, hasLock=false; org.apache.hadoop.hbase.master.assignment.AssignmentManager$DeadServerMetricRegionChore; timeout=120000, timestamp=1732417070585 2024-11-24T02:55:50,585 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] assignment.AssignmentManager(1783): Joined the cluster in 2 msec 2024-11-24T02:55:50,585 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,39871,1732416949314-ClusterStatusChore, period=60000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:50,585 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,39871,1732416949314-BalancerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:50,585 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,39871,1732416949314-RegionNormalizerChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:50,586 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=CatalogJanitor-7c69a60bd8f6:39871, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:50,586 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=HbckChore-, period=3600000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:50,586 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:50,588 DEBUG [master/7c69a60bd8f6:0.Chore.1 {}] janitor.CatalogJanitor(180): 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1239): Master has completed initialization 1.044sec 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] quotas.MasterQuotaManager(97): Quota support disabled 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] slowlog.SlowLogMasterService(57): Slow/Large requests logging to system table hbase:slowlog is disabled. Quitting. 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] waleventtracker.WALEventTrackerTableCreator(75): wal event tracker requests logging to table REPLICATION.WALEVENTTRACKER is disabled. Quitting. 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.ReplicationSinkTrackerTableCreator(90): replication sink tracker requests logging to table REPLICATION.SINK_TRACKER is disabled. Quitting. 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] zookeeper.ZKWatcher(271): not a secure deployment, proceeding 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,39871,1732416949314-MobFileCleanerChore, period=86400, unit=SECONDS is enabled. 2024-11-24T02:55:50,590 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,39871,1732416949314-MobFileCompactionChore, period=604800, unit=SECONDS is enabled. 2024-11-24T02:55:50,592 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster {}] master.HMaster(1374): Balancer post startup initialization complete, took 0 seconds 2024-11-24T02:55:50,592 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=RollingUpgradeChore, period=10, unit=SECONDS is enabled. 2024-11-24T02:55:50,592 INFO [master/7c69a60bd8f6:0:becomeActiveMaster {}] hbase.ChoreService(168): Chore ScheduledChore name=7c69a60bd8f6,39871,1732416949314-OldWALsDirSizeChore, period=300000, unit=MILLISECONDS is enabled. 2024-11-24T02:55:50,599 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@7c62a047, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:55:50,599 DEBUG [Time-limited test {}] client.ClusterIdFetcher(90): Going to request 7c69a60bd8f6,39871,-1 for getting cluster id 2024-11-24T02:55:50,599 DEBUG [Time-limited test {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ConnectionRegistryService, sasl=false 2024-11-24T02:55:50,601 DEBUG [HMaster-EventLoopGroup-16-3 {}] ipc.ServerRpcConnection(714): Response connection registry, clusterId = '4ca5f530-1e74-4edb-a4ed-46e24600ff91' 2024-11-24T02:55:50,601 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(424): process preamble call response with response type GetConnectionRegistryResponse 2024-11-24T02:55:50,601 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ClusterIdFetcher$1(103): Got connection registry info: cluster_id: "4ca5f530-1e74-4edb-a4ed-46e24600ff91" 2024-11-24T02:55:50,601 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@221fc9ad, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:55:50,601 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] client.ConnectionRegistryRpcStubHolder(93): Going to use new servers to create stubs: [7c69a60bd8f6,39871,-1] 2024-11-24T02:55:50,602 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientMetaService, sasl=false 2024-11-24T02:55:50,602 DEBUG [RPCClient-NioEventLoopGroup-4-8 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:50,603 INFO [HMaster-EventLoopGroup-16-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:50560, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientMetaService 2024-11-24T02:55:50,603 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(198): Codec=org.apache.hadoop.hbase.codec.KeyValueCodec@46ae05a3, compressor=null, tcpKeepAlive=true, tcpNoDelay=true, connectTO=10000, readTO=20000, writeTO=60000, minIdleTimeBeforeClose=120000, maxRetries=0, fallbackAllowed=true, bind address=null 2024-11-24T02:55:50,604 DEBUG [Time-limited test {}] client.ConnectionUtils(547): Start fetching meta region location from registry 2024-11-24T02:55:50,605 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] client.ConnectionUtils(555): The fetched meta region location is [region=hbase:meta,,1.1588230740, hostname=7c69a60bd8f6,35109,1732416949481, seqNum=-1] 2024-11-24T02:55:50,605 DEBUG [RPCClient-NioEventLoopGroup-4-9 {}] ipc.RpcConnection(159): Using SIMPLE authentication for service=ClientService, sasl=false 2024-11-24T02:55:50,606 INFO [MiniHBaseClusterRegionServer-EventLoopGroup-17-1 {}] ipc.ServerRpcConnection(484): Connection from 172.17.0.2:47642, version=4.0.0-alpha-1-SNAPSHOT, sasl=false, ugi=jenkins (auth:SIMPLE), service=ClientService 2024-11-24T02:55:50,607 INFO [Time-limited test {}] hbase.HBaseTestingUtil(877): Minicluster is up; activeMaster=7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:50,608 INFO [Time-limited test {}] fs.HFileSystem(339): Added intercepting call to namenode#getBlockLocations so can do block reordering using class org.apache.hadoop.hbase.fs.HFileSystem$ReorderWALBlocks 2024-11-24T02:55:50,610 INFO [Time-limited test {}] master.MasterRpcServices(567): Client=null/null set balanceSwitch=false 2024-11-24T02:55:50,610 INFO [Time-limited test {}] wal.WALFactory(196): Instantiating WALProvider of type class org.apache.hadoop.hbase.wal.FSHLogProvider 2024-11-24T02:55:50,612 INFO [Time-limited test {}] wal.AbstractFSWAL(613): WAL configuration: blocksize=256 MB, rollsize=128 MB, prefix=test.com%2C8080%2C1, suffix=, logDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/test.com,8080,1, archiveDir=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/oldWALs, maxLogs=32 2024-11-24T02:55:50,613 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732416950613 2024-11-24T02:55:50,618 INFO [Time-limited test {}] wal.AbstractFSWAL(991): New WAL /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/test.com,8080,1/test.com%2C8080%2C1.1732416950613 2024-11-24T02:55:50,619 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:45813:45813),(127.0.0.1/127.0.0.1:36739:36739)] 2024-11-24T02:55:50,620 INFO [Time-limited test {}] monitor.StreamSlowMonitor(122): New stream slow monitor test.com%2C8080%2C1.1732416950619 2024-11-24T02:55:50,625 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,625 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,625 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,625 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,625 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,625 INFO [Time-limited test {}] wal.AbstractFSWAL(987): Rolled WAL /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/test.com,8080,1/test.com%2C8080%2C1.1732416950613 with entries=0, filesize=85 B; new WAL /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/test.com,8080,1/test.com%2C8080%2C1.1732416950619 2024-11-24T02:55:50,626 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1109): Create new FSHLog writer with pipeline: [(127.0.0.1/127.0.0.1:36739:36739),(127.0.0.1/127.0.0.1:45813:45813)] 2024-11-24T02:55:50,626 DEBUG [Time-limited test {}] wal.AbstractFSWAL(879): hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/test.com,8080,1/test.com%2C8080%2C1.1732416950613 is not closed yet, will try archiving it next time 2024-11-24T02:55:50,627 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741835_1011 (size=93) 2024-11-24T02:55:50,627 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,627 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741835_1011 (size=93) 2024-11-24T02:55:50,627 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,627 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,628 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,628 INFO [WAL-Archive-0 {}] wal.AbstractFSWAL(968): Archiving hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/WALs/test.com,8080,1/test.com%2C8080%2C1.1732416950613 to hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/oldWALs/test.com%2C8080%2C1.1732416950613 2024-11-24T02:55:50,629 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741836_1012 (size=93) 2024-11-24T02:55:50,630 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741836_1012 (size=93) 2024-11-24T02:55:50,631 DEBUG [Time-limited test {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/oldWALs 2024-11-24T02:55:50,632 INFO [Time-limited test {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog test.com%2C8080%2C1:(num 1732416950619) 2024-11-24T02:55:50,632 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1019): Shutting down minicluster 2024-11-24T02:55:50,632 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:55:50,632 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hbase.thirdparty.com.google.common.io.Closeables.close(Closeables.java:79) at org.apache.hadoop.hbase.HBaseTestingUtil.closeConnection(HBaseTestingUtil.java:2611) at org.apache.hadoop.hbase.HBaseTestingUtil.cleanup(HBaseTestingUtil.java:1065) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1034) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:50,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:50,632 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:50,632 INFO [Registry-endpoints-refresh-end-points {}] client.RegistryEndpointsRefresher(78): Registry end points refresher loop exited. 2024-11-24T02:55:50,632 DEBUG [Time-limited test {}] util.JVMClusterUtil(237): Shutting down HBase Cluster 2024-11-24T02:55:50,632 DEBUG [Time-limited test {}] util.JVMClusterUtil(257): Found active master hash=137211909, stopped=false 2024-11-24T02:55:50,632 INFO [Time-limited test {}] master.ServerManager(983): Cluster shutdown requested of master=7c69a60bd8f6,39871,1732416949314 2024-11-24T02:55:50,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:50,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/running 2024-11-24T02:55:50,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:50,651 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:50,651 INFO [Time-limited test {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:55:50,652 INFO [Time-limited test {}] client.AsyncConnectionImpl(233): Connection has been closed by Time-limited test. 2024-11-24T02:55:50,652 DEBUG [Time-limited test {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.master.HMaster.lambda$shutdown$17(HMaster.java:3306) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.master.HMaster.shutdown(HMaster.java:3277) at org.apache.hadoop.hbase.util.JVMClusterUtil.shutdown(JVMClusterUtil.java:265) at org.apache.hadoop.hbase.LocalHBaseCluster.shutdown(LocalHBaseCluster.java:416) at org.apache.hadoop.hbase.SingleProcessHBaseCluster.shutdown(SingleProcessHBaseCluster.java:676) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniHBaseCluster(HBaseTestingUtil.java:1036) at org.apache.hadoop.hbase.HBaseTestingUtil.shutdownMiniCluster(HBaseTestingUtil.java:1020) at org.apache.hadoop.hbase.regionserver.wal.AbstractTestLogRolling.tearDown(AbstractTestLogRolling.java:163) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.junit.runners.model.FrameworkMethod$1.runReflectiveCall(FrameworkMethod.java:59) at org.junit.internal.runners.model.ReflectiveCallable.run(ReflectiveCallable.java:12) at org.junit.runners.model.FrameworkMethod.invokeExplosively(FrameworkMethod.java:56) at org.junit.internal.runners.statements.RunAfters.invokeMethod(RunAfters.java:46) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:33) at org.junit.rules.TestWatcher$1.evaluate(TestWatcher.java:61) at org.junit.runners.ParentRunner$3.evaluate(ParentRunner.java:306) at org.junit.runners.BlockJUnit4ClassRunner$1.evaluate(BlockJUnit4ClassRunner.java:100) at org.junit.runners.ParentRunner.runLeaf(ParentRunner.java:366) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:103) at org.junit.runners.BlockJUnit4ClassRunner.runChild(BlockJUnit4ClassRunner.java:63) at org.junit.runners.ParentRunner$4.run(ParentRunner.java:331) at org.junit.runners.ParentRunner$1.schedule(ParentRunner.java:79) at org.junit.runners.ParentRunner.runChildren(ParentRunner.java:329) at org.junit.runners.ParentRunner.access$100(ParentRunner.java:66) at org.junit.runners.ParentRunner$2.evaluate(ParentRunner.java:293) at org.junit.internal.runners.statements.RunBefores.evaluate(RunBefores.java:26) at org.junit.internal.runners.statements.RunAfters.evaluate(RunAfters.java:27) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:299) at org.junit.internal.runners.statements.FailOnTimeout$CallableStatement.call(FailOnTimeout.java:293) at java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:50,652 DEBUG [Time-limited test {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:50,652 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:50,652 INFO [Time-limited test {}] regionserver.HRegionServer(2196): ***** STOPPING region server '7c69a60bd8f6,35109,1732416949481' ***** 2024-11-24T02:55:50,652 DEBUG [zk-event-processor-pool-0 {}] zookeeper.ZKUtil(113): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Set watcher on znode that does not yet exist, /hbase/running 2024-11-24T02:55:50,652 INFO [Time-limited test {}] regionserver.HRegionServer(2210): STOPPED: Shutdown requested 2024-11-24T02:55:50,652 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HeapMemoryManager(220): Stopping 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] flush.RegionServerFlushTableProcedureManager(119): Stopping region server flush procedure manager gracefully. 2024-11-24T02:55:50,653 INFO [MemStoreFlusher.0 {}] regionserver.MemStoreFlusher$FlushHandler(374): MemStoreFlusher.0 exiting 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] snapshot.RegionServerSnapshotManager(137): Stopping RegionServerSnapshotManager gracefully. 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(959): stopping server 7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] client.AsyncConnectionImpl(233): Connection has been closed by RS:0;7c69a60bd8f6:35109. 2024-11-24T02:55:50,653 DEBUG [RS:0;7c69a60bd8f6:35109 {}] client.AsyncConnectionImpl(264): Call stack: at java.base/java.lang.Thread.getStackTrace(Thread.java:1619) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.lambda$close$5(AsyncConnectionImpl.java:235) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:187) at org.apache.hadoop.hbase.trace.TraceUtil.trace(TraceUtil.java:177) at org.apache.hadoop.hbase.client.AsyncConnectionImpl.close(AsyncConnectionImpl.java:229) at org.apache.hadoop.hbase.HBaseServerBase.closeClusterConnection(HBaseServerBase.java:457) at org.apache.hadoop.hbase.regionserver.HRegionServer.run(HRegionServer.java:962) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.runRegionServer(SingleProcessHBaseCluster.java:171) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer$1.run(SingleProcessHBaseCluster.java:155) at java.base/java.security.AccessController.doPrivileged(AccessController.java:399) at java.base/javax.security.auth.Subject.doAs(Subject.java:376) at org.apache.hadoop.security.UserGroupInformation.doAs(UserGroupInformation.java:1930) at org.apache.hadoop.hbase.security.User$SecureHadoopUser.runAs(User.java:322) at org.apache.hadoop.hbase.SingleProcessHBaseCluster$MiniHBaseClusterRegionServer.run(SingleProcessHBaseCluster.java:152) at java.base/java.lang.Thread.run(Thread.java:840) 2024-11-24T02:55:50,653 DEBUG [RS:0;7c69a60bd8f6:35109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.CompactSplit(469): Waiting for Split Thread to finish... 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.CompactSplit(469): Waiting for Large Compaction Thread to finish... 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.CompactSplit(469): Waiting for Small Compaction Thread to finish... 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(3091): Received CLOSE for 1588230740 2024-11-24T02:55:50,653 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1321): Waiting on 1 regions to close 2024-11-24T02:55:50,653 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1325): Online Regions={1588230740=hbase:meta,,1.1588230740} 2024-11-24T02:55:50,653 DEBUG [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1351): Waiting on 1588230740 2024-11-24T02:55:50,654 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1722): Closing 1588230740, disabling compactions & flushes 2024-11-24T02:55:50,654 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1755): Closing region hbase:meta,,1.1588230740 2024-11-24T02:55:50,654 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1776): Time limited wait for close lock on hbase:meta,,1.1588230740 2024-11-24T02:55:50,654 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1843): Acquired close lock on hbase:meta,,1.1588230740 after waiting 0 ms 2024-11-24T02:55:50,654 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1853): Updates disabled for region hbase:meta,,1.1588230740 2024-11-24T02:55:50,654 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(2902): Flushing 1588230740 4/4 column families, dataSize=74 B heapSize=1.22 KB 2024-11-24T02:55:50,672 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740/.tmp/ns/d56bcdade7844b049215759530db8b20 is 43, key is default/ns:d/1732416950577/Put/seqid=0 2024-11-24T02:55:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741837_1013 (size=5153) 2024-11-24T02:55:50,676 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741837_1013 (size=5153) 2024-11-24T02:55:50,677 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=74 B at sequenceid=6 (bloomFilter=true), to=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740/.tmp/ns/d56bcdade7844b049215759530db8b20 2024-11-24T02:55:50,682 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740/.tmp/ns/d56bcdade7844b049215759530db8b20 as hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740/ns/d56bcdade7844b049215759530db8b20 2024-11-24T02:55:50,687 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740/ns/d56bcdade7844b049215759530db8b20, entries=2, sequenceid=6, filesize=5.0 K 2024-11-24T02:55:50,688 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(3140): Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false 2024-11-24T02:55:50,692 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] wal.WALSplitUtil(410): Wrote file=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/data/hbase/meta/1588230740/recovered.edits/9.seqid, newMaxSeqId=9, maxSeqId=1 2024-11-24T02:55:50,693 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] coprocessor.CoprocessorHost(310): Stop coprocessor org.apache.hadoop.hbase.coprocessor.MultiRowMutationEndpoint 2024-11-24T02:55:50,693 INFO [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1973): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:50,693 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] regionserver.HRegion(1676): Region close journal for 1588230740: Waiting for close lock at 1732416950653Running coprocessor pre-close hooks at 1732416950653Disabling compacts and flushes for region at 1732416950654 (+1 ms)Disabling writes for close at 1732416950654Obtaining lock to block concurrent updates at 1732416950654Preparing flush snapshotting stores in 1588230740 at 1732416950654Finished memstore snapshotting hbase:meta,,1.1588230740, syncing WAL and waiting on mvcc, flushsize=dataSize=74, getHeapSize=1184, getOffHeapSize=0, getCellsCount=2 at 1732416950654Flushing stores of hbase:meta,,1.1588230740 at 1732416950655 (+1 ms)Flushing 1588230740/ns: creating writer at 1732416950655Flushing 1588230740/ns: appending metadata at 1732416950672 (+17 ms)Flushing 1588230740/ns: closing flushed file at 1732416950672Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1014bffb: reopening flushed file at 1732416950681 (+9 ms)Finished flush of dataSize ~74 B/74, heapSize ~464 B/464, currentSize=0 B/0 for 1588230740 in 34ms, sequenceid=6, compaction requested=false at 1732416950688 (+7 ms)Writing region close event to WAL at 1732416950689 (+1 ms)Running coprocessor post-close hooks at 1732416950693 (+4 ms)Closed at 1732416950693 2024-11-24T02:55:50,693 DEBUG [RS_CLOSE_META-regionserver/7c69a60bd8f6:0-0 {event_type=M_RS_CLOSE_META}] handler.CloseRegionHandler(117): Closed hbase:meta,,1.1588230740 2024-11-24T02:55:50,854 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(976): stopping server 7c69a60bd8f6,35109,1732416949481; all regions closed. 2024-11-24T02:55:50,854 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,854 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,854 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,855 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,855 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741834_1010 (size=1152) 2024-11-24T02:55:50,857 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741834_1010 (size=1152) 2024-11-24T02:55:50,860 DEBUG [RS:0;7c69a60bd8f6:35109 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/oldWALs 2024-11-24T02:55:50,860 INFO [RS:0;7c69a60bd8f6:35109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C35109%2C1732416949481.meta:.meta(num 1732416950486) 2024-11-24T02:55:50,861 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,861 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,861 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,861 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,861 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:50,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741833_1009 (size=93) 2024-11-24T02:55:50,863 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741833_1009 (size=93) 2024-11-24T02:55:50,865 DEBUG [RS:0;7c69a60bd8f6:35109 {}] wal.AbstractFSWAL(1256): Moved 1 WAL file(s) to /user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/oldWALs 2024-11-24T02:55:50,865 INFO [RS:0;7c69a60bd8f6:35109 {}] wal.AbstractFSWAL(1259): Closed WAL: FSHLog 7c69a60bd8f6%2C35109%2C1732416949481:(num 1732416950092) 2024-11-24T02:55:50,865 DEBUG [RS:0;7c69a60bd8f6:35109 {}] ipc.AbstractRpcClient(514): Stopping rpc client 2024-11-24T02:55:50,865 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.LeaseManager(133): Closed leases 2024-11-24T02:55:50,865 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:55:50,865 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.ChoreService(370): Chore service for: regionserver/7c69a60bd8f6:0 had [ScheduledChore name=CompactionThroughputTuner, period=60000, unit=MILLISECONDS, ScheduledChore name=ReplicationSinkStatistics, period=300000, unit=MILLISECONDS, ScheduledChore name=ReplicationSourceStatistics, period=300000, unit=MILLISECONDS] on shutdown 2024-11-24T02:55:50,866 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:55:50,866 INFO [regionserver/7c69a60bd8f6:0.logRoller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:55:50,866 INFO [RS:0;7c69a60bd8f6:35109 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:35109 2024-11-24T02:55:50,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase/rs 2024-11-24T02:55:50,872 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:55:50,872 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/rs/7c69a60bd8f6,35109,1732416949481 2024-11-24T02:55:50,883 INFO [RegionServerTracker-0 {}] master.RegionServerTracker(171): RegionServer ephemeral node deleted, processing expiration [7c69a60bd8f6,35109,1732416949481] 2024-11-24T02:55:50,893 DEBUG [RegionServerTracker-0 {}] zookeeper.RecoverableZooKeeper(212): Node /hbase/draining/7c69a60bd8f6,35109,1732416949481 already deleted, retry=false 2024-11-24T02:55:50,893 INFO [RegionServerTracker-0 {}] master.ServerManager(688): Cluster shutdown set; 7c69a60bd8f6,35109,1732416949481 expired; onlineServers=0 2024-11-24T02:55:50,893 INFO [RegionServerTracker-0 {}] master.HMaster(3321): ***** STOPPING master '7c69a60bd8f6,39871,1732416949314' ***** 2024-11-24T02:55:50,893 INFO [RegionServerTracker-0 {}] master.HMaster(3323): STOPPED: Cluster shutdown set; onlineServer=0 2024-11-24T02:55:50,894 INFO [M:0;7c69a60bd8f6:39871 {}] hbase.HBaseServerBase(455): Close async cluster connection 2024-11-24T02:55:50,894 INFO [M:0;7c69a60bd8f6:39871 {}] hbase.HBaseServerBase(438): Shutdown chores and chore service 2024-11-24T02:55:50,894 DEBUG [M:0;7c69a60bd8f6:39871 {}] cleaner.LogCleaner(198): Cancelling LogCleaner 2024-11-24T02:55:50,894 WARN [OldWALsCleaner-0 {}] cleaner.LogCleaner(186): Interrupted while cleaning old WALs, will try to clean it next round. Exiting. 2024-11-24T02:55:50,894 DEBUG [M:0;7c69a60bd8f6:39871 {}] cleaner.HFileCleaner(335): Stopping file delete threads 2024-11-24T02:55:50,894 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416949809 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.small.0-1732416949809,5,FailOnTimeoutGroup] 2024-11-24T02:55:50,894 DEBUG [master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416949808 {}] cleaner.HFileCleaner(306): Exit Thread[master/7c69a60bd8f6:0:becomeActiveMaster-HFileCleaner.large.0-1732416949808,5,FailOnTimeoutGroup] 2024-11-24T02:55:50,894 INFO [M:0;7c69a60bd8f6:39871 {}] hbase.ChoreService(370): Chore service for: master/7c69a60bd8f6:0 had [ScheduledChore name=FlushedSequenceIdFlusher, period=10800000, unit=MILLISECONDS] on shutdown 2024-11-24T02:55:50,894 INFO [M:0;7c69a60bd8f6:39871 {}] hbase.HBaseServerBase(448): Shutdown executor service 2024-11-24T02:55:50,894 DEBUG [M:0;7c69a60bd8f6:39871 {}] master.HMaster(1795): Stopping service threads 2024-11-24T02:55:50,894 INFO [M:0;7c69a60bd8f6:39871 {}] procedure2.RemoteProcedureDispatcher(119): Stopping procedure remote dispatcher 2024-11-24T02:55:50,894 INFO [M:0;7c69a60bd8f6:39871 {}] procedure2.ProcedureExecutor(723): Stopping 2024-11-24T02:55:50,894 INFO [M:0;7c69a60bd8f6:39871 {}] region.RegionProcedureStore(113): Stopping the Region Procedure Store, isAbort=false 2024-11-24T02:55:50,894 DEBUG [normalizer-worker-0 {}] normalizer.RegionNormalizerWorker(193): interrupt detected. terminating. 2024-11-24T02:55:50,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeDeleted, state=SyncConnected, path=/hbase/master 2024-11-24T02:55:50,904 DEBUG [M:0;7c69a60bd8f6:39871 {}] zookeeper.ZKUtil(347): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Unable to get data of znode /hbase/master because node does not exist (not an error) 2024-11-24T02:55:50,904 WARN [M:0;7c69a60bd8f6:39871 {}] master.ActiveMasterManager(344): Failed get of master address: java.io.IOException: Can't get master address from ZooKeeper; znode data == null 2024-11-24T02:55:50,904 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=NodeChildrenChanged, state=SyncConnected, path=/hbase 2024-11-24T02:55:50,905 INFO [M:0;7c69a60bd8f6:39871 {}] master.ServerManager(1139): Writing .lastflushedseqids file at: hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/.lastflushedseqids 2024-11-24T02:55:50,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741838_1014 (size=99) 2024-11-24T02:55:50,910 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741838_1014 (size=99) 2024-11-24T02:55:50,911 INFO [M:0;7c69a60bd8f6:39871 {}] assignment.AssignmentManager(395): Stopping assignment manager 2024-11-24T02:55:50,911 INFO [M:0;7c69a60bd8f6:39871 {}] region.MasterRegion(195): Closing local region {ENCODED => 1595e783b53d99cd5eef43b6debb2682, NAME => 'master:store,,1.1595e783b53d99cd5eef43b6debb2682.', STARTKEY => '', ENDKEY => ''}, isAbort=false 2024-11-24T02:55:50,911 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(1722): Closing 1595e783b53d99cd5eef43b6debb2682, disabling compactions & flushes 2024-11-24T02:55:50,911 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(1755): Closing region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:50,911 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(1776): Time limited wait for close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:50,911 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(1843): Acquired close lock on master:store,,1.1595e783b53d99cd5eef43b6debb2682. after waiting 0 ms 2024-11-24T02:55:50,911 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(1853): Updates disabled for region master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:50,911 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(2902): Flushing 1595e783b53d99cd5eef43b6debb2682 4/4 column families, dataSize=7.67 KB heapSize=11.34 KB 2024-11-24T02:55:50,929 DEBUG [M:0;7c69a60bd8f6:39871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/023506f931674affbe409e7a5a7f4040 is 82, key is hbase:meta,,1/info:regioninfo/1732416950531/Put/seqid=0 2024-11-24T02:55:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741839_1015 (size=5672) 2024-11-24T02:55:50,933 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741839_1015 (size=5672) 2024-11-24T02:55:50,934 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=504 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/023506f931674affbe409e7a5a7f4040 2024-11-24T02:55:50,953 DEBUG [M:0;7c69a60bd8f6:39871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/741163d55d354f6e8d3ac54cccbe8d3c is 240, key is \x00\x00\x00\x00\x00\x00\x00\x01/proc:d/1732416950581/Put/seqid=0 2024-11-24T02:55:50,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741840_1016 (size=5275) 2024-11-24T02:55:50,957 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741840_1016 (size=5275) 2024-11-24T02:55:50,958 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=7.06 KB at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/741163d55d354f6e8d3ac54cccbe8d3c 2024-11-24T02:55:50,975 DEBUG [M:0;7c69a60bd8f6:39871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/686d8ae0d73749cb801e36f679373d56 is 69, key is 7c69a60bd8f6,35109,1732416949481/rs:state/1732416949934/Put/seqid=0 2024-11-24T02:55:50,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741841_1017 (size=5156) 2024-11-24T02:55:50,979 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741841_1017 (size=5156) 2024-11-24T02:55:50,981 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=65 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/686d8ae0d73749cb801e36f679373d56 2024-11-24T02:55:50,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:50,983 INFO [RS:0;7c69a60bd8f6:35109 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:55:50,983 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): regionserver:35109-0x1016ac4c7db0001, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:50,983 INFO [RS:0;7c69a60bd8f6:35109 {}] regionserver.HRegionServer(1031): Exiting; stopping=7c69a60bd8f6,35109,1732416949481; zookeeper connection closed. 2024-11-24T02:55:50,984 INFO [Shutdown of org.apache.hadoop.hbase.fs.HFileSystem@43c18401 {}] hbase.SingleProcessHBaseCluster$SingleFileSystemShutdownThread(211): Hook closing fs=org.apache.hadoop.hbase.fs.HFileSystem@43c18401 2024-11-24T02:55:50,984 INFO [Time-limited test {}] util.JVMClusterUtil(335): Shutdown of 1 master(s) and 1 regionserver(s) complete 2024-11-24T02:55:51,001 DEBUG [M:0;7c69a60bd8f6:39871 {}] hfile.HFileWriterImpl(814): Len of the biggest cell in hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c1f448688d841ad8a3d46f075e6dfa6 is 52, key is load_balancer_on/state:d/1732416950609/Put/seqid=0 2024-11-24T02:55:51,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741842_1018 (size=5056) 2024-11-24T02:55:51,005 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741842_1018 (size=5056) 2024-11-24T02:55:51,006 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.DefaultStoreFlusher(81): Flushed memstore data size=48 B at sequenceid=29 (bloomFilter=true), to=hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c1f448688d841ad8a3d46f075e6dfa6 2024-11-24T02:55:51,010 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/info/023506f931674affbe409e7a5a7f4040 as hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/023506f931674affbe409e7a5a7f4040 2024-11-24T02:55:51,014 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/info/023506f931674affbe409e7a5a7f4040, entries=8, sequenceid=29, filesize=5.5 K 2024-11-24T02:55:51,015 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/proc/741163d55d354f6e8d3ac54cccbe8d3c as hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/741163d55d354f6e8d3ac54cccbe8d3c 2024-11-24T02:55:51,019 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/proc/741163d55d354f6e8d3ac54cccbe8d3c, entries=3, sequenceid=29, filesize=5.2 K 2024-11-24T02:55:51,020 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/rs/686d8ae0d73749cb801e36f679373d56 as hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/686d8ae0d73749cb801e36f679373d56 2024-11-24T02:55:51,025 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/rs/686d8ae0d73749cb801e36f679373d56, entries=1, sequenceid=29, filesize=5.0 K 2024-11-24T02:55:51,026 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegionFileSystem(442): Committing hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/.tmp/state/3c1f448688d841ad8a3d46f075e6dfa6 as hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c1f448688d841ad8a3d46f075e6dfa6 2024-11-24T02:55:51,030 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HStore$StoreFlusherImpl(1990): Added hdfs://localhost:36367/user/jenkins/test-data/63402865-ccfb-e1ee-8319-ed530b468e2f/MasterData/data/master/store/1595e783b53d99cd5eef43b6debb2682/state/3c1f448688d841ad8a3d46f075e6dfa6, entries=1, sequenceid=29, filesize=4.9 K 2024-11-24T02:55:51,031 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(3140): Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=29, compaction requested=false 2024-11-24T02:55:51,033 INFO [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(1973): Closed master:store,,1.1595e783b53d99cd5eef43b6debb2682. 2024-11-24T02:55:51,033 DEBUG [M:0;7c69a60bd8f6:39871 {}] regionserver.HRegion(1676): Region close journal for 1595e783b53d99cd5eef43b6debb2682: Waiting for close lock at 1732416950911Disabling compacts and flushes for region at 1732416950911Disabling writes for close at 1732416950911Obtaining lock to block concurrent updates at 1732416950911Preparing flush snapshotting stores in 1595e783b53d99cd5eef43b6debb2682 at 1732416950911Finished memstore snapshotting master:store,,1.1595e783b53d99cd5eef43b6debb2682., syncing WAL and waiting on mvcc, flushsize=dataSize=7850, getHeapSize=11544, getOffHeapSize=0, getCellsCount=36 at 1732416950912 (+1 ms)Flushing stores of master:store,,1.1595e783b53d99cd5eef43b6debb2682. at 1732416950912Flushing 1595e783b53d99cd5eef43b6debb2682/info: creating writer at 1732416950912Flushing 1595e783b53d99cd5eef43b6debb2682/info: appending metadata at 1732416950929 (+17 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/info: closing flushed file at 1732416950929Flushing 1595e783b53d99cd5eef43b6debb2682/proc: creating writer at 1732416950938 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: appending metadata at 1732416950953 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/proc: closing flushed file at 1732416950953Flushing 1595e783b53d99cd5eef43b6debb2682/rs: creating writer at 1732416950962 (+9 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: appending metadata at 1732416950975 (+13 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/rs: closing flushed file at 1732416950975Flushing 1595e783b53d99cd5eef43b6debb2682/state: creating writer at 1732416950985 (+10 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: appending metadata at 1732416951000 (+15 ms)Flushing 1595e783b53d99cd5eef43b6debb2682/state: closing flushed file at 1732416951000Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@4072455c: reopening flushed file at 1732416951009 (+9 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@300f28ac: reopening flushed file at 1732416951014 (+5 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@1ce5f8b9: reopening flushed file at 1732416951020 (+6 ms)Flushing org.apache.hadoop.hbase.regionserver.HStore$StoreFlusherImpl@6a4d1ae3: reopening flushed file at 1732416951025 (+5 ms)Finished flush of dataSize ~7.67 KB/7850, heapSize ~11.27 KB/11544, currentSize=0 B/0 for 1595e783b53d99cd5eef43b6debb2682 in 120ms, sequenceid=29, compaction requested=false at 1732416951031 (+6 ms)Writing region close event to WAL at 1732416951033 (+2 ms)Closed at 1732416951033 2024-11-24T02:55:51,033 INFO [sync.0 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:51,034 INFO [sync.1 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:51,034 INFO [sync.2 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:51,034 INFO [sync.3 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:51,034 INFO [sync.4 {}] wal.FSHLog$SyncRunner(477): interrupted 2024-11-24T02:55:51,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:36785 is added to blk_1073741830_1006 (size=10311) 2024-11-24T02:55:51,036 INFO [Block report processor {}] blockmanagement.BlockManager(3777): BLOCK* addStoredBlock: 127.0.0.1:40949 is added to blk_1073741830_1006 (size=10311) 2024-11-24T02:55:51,037 INFO [M:0;7c69a60bd8f6:39871 {}] flush.MasterFlushTableProcedureManager(90): stop: server shutting down. 2024-11-24T02:55:51,037 INFO [master:store-WAL-Roller {}] wal.AbstractWALRoller(249): LogRoller exiting. 2024-11-24T02:55:51,037 INFO [M:0;7c69a60bd8f6:39871 {}] ipc.NettyRpcServer(345): Stopping server on /172.17.0.2:39871 2024-11-24T02:55:51,037 INFO [M:0;7c69a60bd8f6:39871 {}] hbase.HBaseServerBase(479): Close zookeeper 2024-11-24T02:55:51,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:51,141 INFO [M:0;7c69a60bd8f6:39871 {}] hbase.HBaseServerBase(486): Close table descriptors 2024-11-24T02:55:51,141 DEBUG [Time-limited test-EventThread {}] zookeeper.ZKWatcher(609): master:39871-0x1016ac4c7db0000, quorum=127.0.0.1:62587, baseZNode=/hbase Received ZooKeeper Event, type=None, state=Closed, path=null 2024-11-24T02:55:51,143 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@14628127{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:51,144 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@5982a43f{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:51,144 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:51,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@20dd8a9c{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:51,144 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2f95849a{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:51,145 WARN [BP-1833539678-172.17.0.2-1732416946809 heartbeating to localhost/127.0.0.1:36367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:55:51,146 WARN [BP-1833539678-172.17.0.2-1732416946809 heartbeating to localhost/127.0.0.1:36367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833539678-172.17.0.2-1732416946809 (Datanode Uuid 5635e1f5-edd9-4d69-89b4-4dd458560d1b) service to localhost/127.0.0.1:36367 2024-11-24T02:55:51,146 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:55:51,146 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:55:51,146 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data3/current/BP-1833539678-172.17.0.2-1732416946809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:51,147 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data4/current/BP-1833539678-172.17.0.2-1732416946809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:51,147 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:55:51,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@4bb82bb7{datanode,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/datanode} 2024-11-24T02:55:51,149 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@6a35d82b{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:51,149 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:51,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@5ec9da7e{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:51,149 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@1f1185ee{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:51,150 WARN [BP-1833539678-172.17.0.2-1732416946809 heartbeating to localhost/127.0.0.1:36367 {}] datanode.IncrementalBlockReportManager(160): IncrementalBlockReportManager interrupted 2024-11-24T02:55:51,150 WARN [BP-1833539678-172.17.0.2-1732416946809 heartbeating to localhost/127.0.0.1:36367 {}] datanode.BPServiceActor(925): Ending block pool service for: Block pool BP-1833539678-172.17.0.2-1732416946809 (Datanode Uuid 3589e288-70c8-49fa-baa9-6675a4dc530f) service to localhost/127.0.0.1:36367 2024-11-24T02:55:51,150 ERROR [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1416): Command processor encountered interrupt and exit. 2024-11-24T02:55:51,150 WARN [Command processor {}] datanode.BPServiceActor$CommandProcessingThread(1400): Ending command processor service for: Thread[Command processor,5,FailOnTimeoutGroup] 2024-11-24T02:55:51,151 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data1/current/BP-1833539678-172.17.0.2-1732416946809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:51,151 WARN [refreshUsed-/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/cluster_748d703e-3328-2e80-fb2c-cb67d9fcd60c/data/data2/current/BP-1833539678-172.17.0.2-1732416946809 {}] fs.CachingGetSpaceUsed$RefreshThread(231): Thread Interrupted waiting to refresh disk information: sleep interrupted 2024-11-24T02:55:51,151 WARN [Time-limited test {}] datanode.DataSetLockManager(261): not open lock leak check func 2024-11-24T02:55:51,156 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.w.WebAppContext@1e56b299{hdfs,/,null,STOPPED}{jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/hdfs} 2024-11-24T02:55:51,157 INFO [Time-limited test {}] server.AbstractConnector(383): Stopped ServerConnector@223a801d{HTTP/1.1, (http/1.1)}{localhost:0} 2024-11-24T02:55:51,157 INFO [Time-limited test {}] session.HouseKeeper(149): node0 Stopped scavenging 2024-11-24T02:55:51,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@61266769{static,/static,jar:file:/home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/local-repository/org/apache/hadoop/hadoop-hdfs/3.4.1/hadoop-hdfs-3.4.1-tests.jar!/webapps/static,STOPPED} 2024-11-24T02:55:51,157 INFO [Time-limited test {}] handler.ContextHandler(1159): Stopped o.e.j.s.ServletContextHandler@2482618b{logs,/logs,file:///home/jenkins/jenkins-home/workspace/HBase-Flaky-Tests_master/hbase-server/target/test-data/b6143e94-cb5c-0f1e-a547-abd3d5864e5b/hadoop.log.dir/,STOPPED} 2024-11-24T02:55:51,162 INFO [Time-limited test {}] zookeeper.MiniZooKeeperCluster(347): Shutdown MiniZK cluster with all ZK servers 2024-11-24T02:55:51,180 INFO [Time-limited test {}] hbase.HBaseTestingUtil(1026): Minicluster is down 2024-11-24T02:55:51,188 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:51,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:51,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:51,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:51,189 INFO [Time-limited test {}] hbase.ResourceChecker(175): after: regionserver.wal.TestLogRolling#testLogRollOnNothingWritten Thread=268 (was 230) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36367 from jenkins.hfs.7 java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36367 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-45-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins@localhost:36367 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-2 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-45-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-43-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Client (75657370) connection to localhost/127.0.0.1:36367 from jenkins java.base@17.0.11/java.lang.Object.wait(Native Method) app//org.apache.hadoop.ipc.Client$Connection.waitForWork(Client.java:1042) app//org.apache.hadoop.ipc.Client$Connection.run(Client.java:1093) Potentially hanging thread: nioEventLoopGroup-43-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: LeaseRenewer:jenkins.hfs.7@localhost:36367 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.run(LeaseRenewer.java:441) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer.access$800(LeaseRenewer.java:77) app//org.apache.hadoop.hdfs.client.impl.LeaseRenewer$1.run(LeaseRenewer.java:336) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-3 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: MiniHBaseClusterRegionServer-EventLoopGroup-17-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: HMaster-EventLoopGroup-16-1 app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native Method) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:220) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.Native.epollWait(Native.java:213) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.epollWaitNoTimerChange(EpollEventLoop.java:308) app//org.apache.hbase.thirdparty.io.netty.channel.epoll.EpollEventLoop.run(EpollEventLoop.java:365) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:997) app//org.apache.hbase.thirdparty.io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//org.apache.hbase.thirdparty.io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-1 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-44-2 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: nioEventLoopGroup-42-3 java.base@17.0.11/java.lang.Thread.sleep(Native Method) app//io.netty.util.concurrent.SingleThreadEventExecutor.confirmShutdown(SingleThreadEventExecutor.java:787) app//io.netty.channel.nio.NioEventLoop.run(NioEventLoop.java:596) app//io.netty.util.concurrent.SingleThreadEventExecutor$4.run(SingleThreadEventExecutor.java:994) app//io.netty.util.internal.ThreadExecutorMap$2.run(ThreadExecutorMap.java:74) app//io.netty.util.concurrent.FastThreadLocalRunnable.run(FastThreadLocalRunnable.java:30) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) Potentially hanging thread: IPC Parameter Sending Thread for localhost/127.0.0.1:36367 java.base@17.0.11/jdk.internal.misc.Unsafe.park(Native Method) java.base@17.0.11/java.util.concurrent.locks.LockSupport.parkNanos(LockSupport.java:252) java.base@17.0.11/java.util.concurrent.SynchronousQueue$TransferQueue.transfer(SynchronousQueue.java:704) java.base@17.0.11/java.util.concurrent.SynchronousQueue.poll(SynchronousQueue.java:903) app//org.apache.hadoop.ipc.Client$Connection$RpcRequestSender.run(Client.java:1121) java.base@17.0.11/java.lang.Thread.run(Thread.java:840) - Thread LEAK? -, OpenFileDescriptor=534 (was 517) - OpenFileDescriptor LEAK? -, MaxFileDescriptor=1048576 (was 1048576), SystemLoadAverage=164 (was 164), ProcessCount=11 (was 11), AvailableMemoryMB=9493 (was 9504) 2024-11-24T02:55:51,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null 2024-11-24T02:55:51,189 WARN [HBase-Metrics2-1 {}] impl.FsDatasetImpl(779): Exception thrown while metric collection. Exception : Cannot invoke "java.util.Map.values()" because "this.executors" is null